repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/modules/mac_service.py
enabled
python
def enabled(name, runas=None): ''' Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd ''' # Try to list the service. If it can't be listed, it's not enabled try: list_(name=name, runas=runas) return True except CommandExecutionError: return False
Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L571-L593
[ "def list_(name=None, runas=None):\n '''\n Run launchctl list and return the output\n\n :param str name: The name of the service to list\n\n :param str runas: User to run launchctl commands\n\n :return: If a name is passed returns information about the named service,\n otherwise returns a list of all services and pids\n :rtype: str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.list\n salt '*' service.list org.cups.cupsd\n '''\n if name:\n # Get service information and label\n service = _get_service(name)\n label = service['plist']['Label']\n\n # we can assume if we are trying to list a LaunchAgent we need\n # to run as a user, if not provided, we'll use the console user.\n if not runas and _launch_agent(name):\n runas = __utils__['mac_utils.console_user'](username=True)\n\n # Collect information on service: will raise an error if it fails\n return launchctl('list',\n label,\n return_stdout=True,\n runas=runas)\n\n # Collect information on all services: will raise an error if it fails\n return launchctl('list',\n return_stdout=True,\n runas=runas)\n" ]
# -*- coding: utf-8 -*- ''' The service module for macOS .. versionadded:: 2016.3.0 This module has support for services in the following locations. .. code-block:: bash /System/Library/LaunchDaemons/ /System/Library/LaunchAgents/ /Library/LaunchDaemons/ /Library/LaunchAgents/ # As of version "2019.2.0" support for user-specific services were added. /Users/foo/Library/LaunchAgents/ .. note:: As of the 2019.2.0 release, if a service is located in a ``LaunchAgent`` path and a ``runas`` user is NOT specified, the current console user will be used to properly interact with the service. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os import re # Import salt libs import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd party libs from salt.ext import six # Define the module's virtual name __virtualname__ = 'service' __func_alias__ = { 'list_': 'list', } log = logging.getLogger(__name__) def __virtual__(): ''' Only for macOS with launchctl ''' if not salt.utils.platform.is_darwin(): return (False, 'Failed to load the mac_service module:\n' 'Only available on macOS systems.') if not salt.utils.path.which('launchctl'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "launchctl"') if not salt.utils.path.which('plutil'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "plutil"') if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'): return (False, 'Failed to load the mac_service module:\n' 'Requires macOS 10.11 or newer') return __virtualname__ def _name_in_services(name, services): ''' Checks to see if the given service is in the given services. :param str name: Service label, file name, or full path :param dict services: The currently available services. :return: The service information for the service, otherwise an empty dictionary :rtype: dict ''' if name in services: # Match on label return services[name] for service in six.itervalues(services): if service['file_path'].lower() == name: # Match on full path return service basename, ext = os.path.splitext(service['file_name']) if basename.lower() == name: # Match on basename return service return dict() def _get_service(name): ''' Get information about a service. If the service is not found, raise an error :param str name: Service label, file name, or full path :return: The service information for the service, otherwise an Error :rtype: dict ''' services = __utils__['mac_utils.available_services']() name = name.lower() service = _name_in_services(name, services) # if we would the service we can return it if service: return service # if we got here our service is not available, now we can check to see if # we received a cached batch of services, if not we did a fresh check # so we need to raise that the service could not be found. try: if not __context__['using_cached_services']: raise CommandExecutionError('Service not found: {0}'.format(name)) except KeyError: pass # we used a cached version to check, a service could have been made # between now and then, we should refresh our available services. services = __utils__['mac_utils.available_services'](refresh=True) # check to see if we found the service we are looking for. service = _name_in_services(name, services) if not service: # Could not find the service after refresh raise. raise CommandExecutionError('Service not found: {0}'.format(name)) # found it :) return service def _always_running_service(name): ''' Check if the service should always be running based on the KeepAlive Key in the service plist. :param str name: Service label, file name, or full path :return: True if the KeepAlive key is set to True, False if set to False or not set in the plist at all. :rtype: bool .. versionadded:: 2019.2.0 ''' # get all the info from the launchctl service service_info = show(name) # get the value for the KeepAlive key in service plist try: keep_alive = service_info['plist']['KeepAlive'] except KeyError: return False # check if KeepAlive is True and not just set. if isinstance(keep_alive, dict): # check for pathstate for _file, value in six.iteritems(keep_alive.get('PathState', {})): if value is True and os.path.exists(_file): return True elif value is False and not os.path.exists(_file): return True if keep_alive is True: return True return False def _get_domain_target(name, service_target=False): ''' Returns the domain/service target and path for a service. This is used to determine whether or not a service should be loaded in a user space or system space. :param str name: Service label, file name, or full path :param bool service_target: Whether to return a full service target. This is needed for the enable and disable subcommands of /bin/launchctl. Defaults to False :return: Tuple of the domain/service target and the path to the service. :rtype: tuple .. versionadded:: 2019.2.0 ''' # Get service information service = _get_service(name) # get the path to the service path = service['file_path'] # most of the time we'll be at the system level. domain_target = 'system' # check if a LaunchAgent as we should treat these differently. if 'LaunchAgents' in path: # Get the console user so we can service in the correct session uid = __utils__['mac_utils.console_user']() domain_target = 'gui/{}'.format(uid) # check to see if we need to make it a full service target. if service_target is True: domain_target = '{}/{}'.format(domain_target, service['plist']['Label']) return (domain_target, path) def _launch_agent(name): ''' Checks to see if the provided service is a LaunchAgent :param str name: Service label, file name, or full path :return: True if a LaunchAgent, False if not. :rtype: bool .. versionadded:: 2019.2.0 ''' # Get the path to the service. path = _get_service(name)['file_path'] if 'LaunchAgents' not in path: return False return True def show(name): ''' Show properties of a launchctl service :param str name: Service label, file name, or full path :return: The service information if the service is found :rtype: dict CLI Example: .. code-block:: bash salt '*' service.show org.cups.cupsd # service label salt '*' service.show org.cups.cupsd.plist # file name salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path ''' return _get_service(name) def launchctl(sub_cmd, *args, **kwargs): ''' Run a launchctl command and raise an error if it fails :param str sub_cmd: Sub command supplied to launchctl :param tuple args: Tuple containing additional arguments to pass to launchctl :param dict kwargs: Dictionary containing arguments to pass to ``cmd.run_all`` :param bool return_stdout: A keyword argument. If true return the stdout of the launchctl command :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or the stdout of the launchctl command if requested :rtype: bool, str CLI Example: .. code-block:: bash salt '*' service.launchctl debug org.cups.cupsd ''' return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs) def list_(name=None, runas=None): ''' Run launchctl list and return the output :param str name: The name of the service to list :param str runas: User to run launchctl commands :return: If a name is passed returns information about the named service, otherwise returns a list of all services and pids :rtype: str CLI Example: .. code-block:: bash salt '*' service.list salt '*' service.list org.cups.cupsd ''' if name: # Get service information and label service = _get_service(name) label = service['plist']['Label'] # we can assume if we are trying to list a LaunchAgent we need # to run as a user, if not provided, we'll use the console user. if not runas and _launch_agent(name): runas = __utils__['mac_utils.console_user'](username=True) # Collect information on service: will raise an error if it fails return launchctl('list', label, return_stdout=True, runas=runas) # Collect information on all services: will raise an error if it fails return launchctl('list', return_stdout=True, runas=runas) def enable(name, runas=None): ''' Enable a launchd service. Raises an error if the service fails to be enabled :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already enabled :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enable org.cups.cupsd ''' # Get the domain target. enable requires a full <service-target> service_target = _get_domain_target(name, service_target=True)[0] # Enable the service: will raise an error if it fails return launchctl('enable', service_target, runas=runas) def disable(name, runas=None): ''' Disable a launchd service. Raises an error if the service fails to be disabled :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already disabled :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disable org.cups.cupsd ''' # Get the service target. enable requires a full <service-target> service_target = _get_domain_target(name, service_target=True)[0] # disable the service: will raise an error if it fails return launchctl('disable', service_target, runas=runas) def start(name, runas=None): ''' Start a launchd service. Raises an error if the service fails to start .. note:: To start a service in macOS the service must be enabled first. Use ``service.enable`` to enable the service. :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already running :rtype: bool CLI Example: .. code-block:: bash salt '*' service.start org.cups.cupsd ''' # Get the domain target. domain_target, path = _get_domain_target(name) # Load (bootstrap) the service: will raise an error if it fails return launchctl('bootstrap', domain_target, path, runas=runas) def stop(name, runas=None): ''' Stop a launchd service. Raises an error if the service fails to stop .. note:: Though ``service.stop`` will unload a service in macOS, the service will start on next boot unless it is disabled. Use ``service.disable`` to disable the service :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already stopped :rtype: bool CLI Example: .. code-block:: bash salt '*' service.stop org.cups.cupsd ''' # Get the domain target. domain_target, path = _get_domain_target(name) # Stop (bootout) the service: will raise an error if it fails return launchctl('bootout', domain_target, path, runas=runas) def restart(name, runas=None): ''' Unloads and reloads a launchd service. Raises an error if the service fails to reload :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful :rtype: bool CLI Example: .. code-block:: bash salt '*' service.restart org.cups.cupsd ''' # Restart the service: will raise an error if it fails if enabled(name): stop(name, runas=runas) start(name, runas=runas) return True def status(name, sig=None, runas=None): ''' Return the status for a service. :param str name: Used to find the service from launchctl. Can be any part of the service name or a regex expression. :param str sig: Find the service with status.pid instead. Note that ``name`` must still be provided. :param str runas: User to run launchctl commands :return: The PID for the service if it is running, or 'loaded' if the service should not always have a PID, or otherwise an empty string :rtype: str CLI Example: .. code-block:: bash salt '*' service.status cups ''' # Find service with ps if sig: return __salt__['status.pid'](sig) try: _get_service(name) except CommandExecutionError as msg: log.error(msg) return '' if not runas and _launch_agent(name): runas = __utils__['mac_utils.console_user'](username=True) output = list_(runas=runas) # Used a string here instead of a list because that's what the linux version # of this module does pids = '' for line in output.splitlines(): if 'PID' in line: continue if re.search(name, line.split()[-1]): if line.split()[0].isdigit(): if pids: pids += '\n' pids += line.split()[0] # mac services are a little different than other platforms as they may be # set to run on intervals and may not always active with a PID. This will # return a string 'loaded' if it shouldn't always be running and is enabled. if not _always_running_service(name) and enabled(name) and not pids: return 'loaded' return pids def available(name): ''' Check that the given service is available. :param str name: The name of the service :return: True if the service is available, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.available com.openssh.sshd ''' try: _get_service(name) return True except CommandExecutionError: return False def missing(name): ''' The inverse of service.available Check that the given service is not available. :param str name: The name of the service :return: True if the service is not available, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.missing com.openssh.sshd ''' return not available(name) def disabled(name, runas=None, domain='system'): ''' Check if the specified service is not enabled. This is the opposite of ``service.enabled`` :param str name: The name to look up :param str runas: User to run launchctl commands :param str domain: domain to check for disabled services. Default is system. :return: True if the specified service is NOT enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disabled org.cups.cupsd ''' disabled = launchctl('print-disabled', domain, return_stdout=True, runas=runas) for service in disabled.split("\n"): if name in service: srv_name = service.split("=>")[0].split("\"")[1] status = service.split("=>")[1] if name != srv_name: pass else: return True if 'true' in status.lower() else False return False def get_all(runas=None): ''' Return a list of services that are enabled or available. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services available or enabled :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_all ''' # Get list of enabled services enabled = get_enabled(runas=runas) # Get list of all services available = list(__utils__['mac_utils.available_services']().keys()) # Return composite list return sorted(set(enabled + available)) def get_enabled(runas=None): ''' Return a list of all services that are enabled. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services enabled on the system :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_enabled ''' # Collect list of enabled services stdout = list_(runas=runas) service_lines = [line for line in stdout.splitlines()] # Construct list of enabled services enabled = [] for line in service_lines: # Skip header line if line.startswith('PID'): continue pid, status, label = line.split('\t') enabled.append(label) return sorted(set(enabled))
saltstack/salt
salt/modules/mac_service.py
disabled
python
def disabled(name, runas=None, domain='system'): ''' Check if the specified service is not enabled. This is the opposite of ``service.enabled`` :param str name: The name to look up :param str runas: User to run launchctl commands :param str domain: domain to check for disabled services. Default is system. :return: True if the specified service is NOT enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disabled org.cups.cupsd ''' disabled = launchctl('print-disabled', domain, return_stdout=True, runas=runas) for service in disabled.split("\n"): if name in service: srv_name = service.split("=>")[0].split("\"")[1] status = service.split("=>")[1] if name != srv_name: pass else: return True if 'true' in status.lower() else False return False
Check if the specified service is not enabled. This is the opposite of ``service.enabled`` :param str name: The name to look up :param str runas: User to run launchctl commands :param str domain: domain to check for disabled services. Default is system. :return: True if the specified service is NOT enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disabled org.cups.cupsd
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L596-L630
[ "def launchctl(sub_cmd, *args, **kwargs):\n '''\n Run a launchctl command and raise an error if it fails\n\n :param str sub_cmd: Sub command supplied to launchctl\n\n :param tuple args: Tuple containing additional arguments to pass to\n launchctl\n\n :param dict kwargs: Dictionary containing arguments to pass to\n ``cmd.run_all``\n\n :param bool return_stdout: A keyword argument. If true return the stdout\n of the launchctl command\n\n :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or\n the stdout of the launchctl command if requested\n :rtype: bool, str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.launchctl debug org.cups.cupsd\n '''\n return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)\n" ]
# -*- coding: utf-8 -*- ''' The service module for macOS .. versionadded:: 2016.3.0 This module has support for services in the following locations. .. code-block:: bash /System/Library/LaunchDaemons/ /System/Library/LaunchAgents/ /Library/LaunchDaemons/ /Library/LaunchAgents/ # As of version "2019.2.0" support for user-specific services were added. /Users/foo/Library/LaunchAgents/ .. note:: As of the 2019.2.0 release, if a service is located in a ``LaunchAgent`` path and a ``runas`` user is NOT specified, the current console user will be used to properly interact with the service. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os import re # Import salt libs import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd party libs from salt.ext import six # Define the module's virtual name __virtualname__ = 'service' __func_alias__ = { 'list_': 'list', } log = logging.getLogger(__name__) def __virtual__(): ''' Only for macOS with launchctl ''' if not salt.utils.platform.is_darwin(): return (False, 'Failed to load the mac_service module:\n' 'Only available on macOS systems.') if not salt.utils.path.which('launchctl'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "launchctl"') if not salt.utils.path.which('plutil'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "plutil"') if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'): return (False, 'Failed to load the mac_service module:\n' 'Requires macOS 10.11 or newer') return __virtualname__ def _name_in_services(name, services): ''' Checks to see if the given service is in the given services. :param str name: Service label, file name, or full path :param dict services: The currently available services. :return: The service information for the service, otherwise an empty dictionary :rtype: dict ''' if name in services: # Match on label return services[name] for service in six.itervalues(services): if service['file_path'].lower() == name: # Match on full path return service basename, ext = os.path.splitext(service['file_name']) if basename.lower() == name: # Match on basename return service return dict() def _get_service(name): ''' Get information about a service. If the service is not found, raise an error :param str name: Service label, file name, or full path :return: The service information for the service, otherwise an Error :rtype: dict ''' services = __utils__['mac_utils.available_services']() name = name.lower() service = _name_in_services(name, services) # if we would the service we can return it if service: return service # if we got here our service is not available, now we can check to see if # we received a cached batch of services, if not we did a fresh check # so we need to raise that the service could not be found. try: if not __context__['using_cached_services']: raise CommandExecutionError('Service not found: {0}'.format(name)) except KeyError: pass # we used a cached version to check, a service could have been made # between now and then, we should refresh our available services. services = __utils__['mac_utils.available_services'](refresh=True) # check to see if we found the service we are looking for. service = _name_in_services(name, services) if not service: # Could not find the service after refresh raise. raise CommandExecutionError('Service not found: {0}'.format(name)) # found it :) return service def _always_running_service(name): ''' Check if the service should always be running based on the KeepAlive Key in the service plist. :param str name: Service label, file name, or full path :return: True if the KeepAlive key is set to True, False if set to False or not set in the plist at all. :rtype: bool .. versionadded:: 2019.2.0 ''' # get all the info from the launchctl service service_info = show(name) # get the value for the KeepAlive key in service plist try: keep_alive = service_info['plist']['KeepAlive'] except KeyError: return False # check if KeepAlive is True and not just set. if isinstance(keep_alive, dict): # check for pathstate for _file, value in six.iteritems(keep_alive.get('PathState', {})): if value is True and os.path.exists(_file): return True elif value is False and not os.path.exists(_file): return True if keep_alive is True: return True return False def _get_domain_target(name, service_target=False): ''' Returns the domain/service target and path for a service. This is used to determine whether or not a service should be loaded in a user space or system space. :param str name: Service label, file name, or full path :param bool service_target: Whether to return a full service target. This is needed for the enable and disable subcommands of /bin/launchctl. Defaults to False :return: Tuple of the domain/service target and the path to the service. :rtype: tuple .. versionadded:: 2019.2.0 ''' # Get service information service = _get_service(name) # get the path to the service path = service['file_path'] # most of the time we'll be at the system level. domain_target = 'system' # check if a LaunchAgent as we should treat these differently. if 'LaunchAgents' in path: # Get the console user so we can service in the correct session uid = __utils__['mac_utils.console_user']() domain_target = 'gui/{}'.format(uid) # check to see if we need to make it a full service target. if service_target is True: domain_target = '{}/{}'.format(domain_target, service['plist']['Label']) return (domain_target, path) def _launch_agent(name): ''' Checks to see if the provided service is a LaunchAgent :param str name: Service label, file name, or full path :return: True if a LaunchAgent, False if not. :rtype: bool .. versionadded:: 2019.2.0 ''' # Get the path to the service. path = _get_service(name)['file_path'] if 'LaunchAgents' not in path: return False return True def show(name): ''' Show properties of a launchctl service :param str name: Service label, file name, or full path :return: The service information if the service is found :rtype: dict CLI Example: .. code-block:: bash salt '*' service.show org.cups.cupsd # service label salt '*' service.show org.cups.cupsd.plist # file name salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path ''' return _get_service(name) def launchctl(sub_cmd, *args, **kwargs): ''' Run a launchctl command and raise an error if it fails :param str sub_cmd: Sub command supplied to launchctl :param tuple args: Tuple containing additional arguments to pass to launchctl :param dict kwargs: Dictionary containing arguments to pass to ``cmd.run_all`` :param bool return_stdout: A keyword argument. If true return the stdout of the launchctl command :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or the stdout of the launchctl command if requested :rtype: bool, str CLI Example: .. code-block:: bash salt '*' service.launchctl debug org.cups.cupsd ''' return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs) def list_(name=None, runas=None): ''' Run launchctl list and return the output :param str name: The name of the service to list :param str runas: User to run launchctl commands :return: If a name is passed returns information about the named service, otherwise returns a list of all services and pids :rtype: str CLI Example: .. code-block:: bash salt '*' service.list salt '*' service.list org.cups.cupsd ''' if name: # Get service information and label service = _get_service(name) label = service['plist']['Label'] # we can assume if we are trying to list a LaunchAgent we need # to run as a user, if not provided, we'll use the console user. if not runas and _launch_agent(name): runas = __utils__['mac_utils.console_user'](username=True) # Collect information on service: will raise an error if it fails return launchctl('list', label, return_stdout=True, runas=runas) # Collect information on all services: will raise an error if it fails return launchctl('list', return_stdout=True, runas=runas) def enable(name, runas=None): ''' Enable a launchd service. Raises an error if the service fails to be enabled :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already enabled :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enable org.cups.cupsd ''' # Get the domain target. enable requires a full <service-target> service_target = _get_domain_target(name, service_target=True)[0] # Enable the service: will raise an error if it fails return launchctl('enable', service_target, runas=runas) def disable(name, runas=None): ''' Disable a launchd service. Raises an error if the service fails to be disabled :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already disabled :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disable org.cups.cupsd ''' # Get the service target. enable requires a full <service-target> service_target = _get_domain_target(name, service_target=True)[0] # disable the service: will raise an error if it fails return launchctl('disable', service_target, runas=runas) def start(name, runas=None): ''' Start a launchd service. Raises an error if the service fails to start .. note:: To start a service in macOS the service must be enabled first. Use ``service.enable`` to enable the service. :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already running :rtype: bool CLI Example: .. code-block:: bash salt '*' service.start org.cups.cupsd ''' # Get the domain target. domain_target, path = _get_domain_target(name) # Load (bootstrap) the service: will raise an error if it fails return launchctl('bootstrap', domain_target, path, runas=runas) def stop(name, runas=None): ''' Stop a launchd service. Raises an error if the service fails to stop .. note:: Though ``service.stop`` will unload a service in macOS, the service will start on next boot unless it is disabled. Use ``service.disable`` to disable the service :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already stopped :rtype: bool CLI Example: .. code-block:: bash salt '*' service.stop org.cups.cupsd ''' # Get the domain target. domain_target, path = _get_domain_target(name) # Stop (bootout) the service: will raise an error if it fails return launchctl('bootout', domain_target, path, runas=runas) def restart(name, runas=None): ''' Unloads and reloads a launchd service. Raises an error if the service fails to reload :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful :rtype: bool CLI Example: .. code-block:: bash salt '*' service.restart org.cups.cupsd ''' # Restart the service: will raise an error if it fails if enabled(name): stop(name, runas=runas) start(name, runas=runas) return True def status(name, sig=None, runas=None): ''' Return the status for a service. :param str name: Used to find the service from launchctl. Can be any part of the service name or a regex expression. :param str sig: Find the service with status.pid instead. Note that ``name`` must still be provided. :param str runas: User to run launchctl commands :return: The PID for the service if it is running, or 'loaded' if the service should not always have a PID, or otherwise an empty string :rtype: str CLI Example: .. code-block:: bash salt '*' service.status cups ''' # Find service with ps if sig: return __salt__['status.pid'](sig) try: _get_service(name) except CommandExecutionError as msg: log.error(msg) return '' if not runas and _launch_agent(name): runas = __utils__['mac_utils.console_user'](username=True) output = list_(runas=runas) # Used a string here instead of a list because that's what the linux version # of this module does pids = '' for line in output.splitlines(): if 'PID' in line: continue if re.search(name, line.split()[-1]): if line.split()[0].isdigit(): if pids: pids += '\n' pids += line.split()[0] # mac services are a little different than other platforms as they may be # set to run on intervals and may not always active with a PID. This will # return a string 'loaded' if it shouldn't always be running and is enabled. if not _always_running_service(name) and enabled(name) and not pids: return 'loaded' return pids def available(name): ''' Check that the given service is available. :param str name: The name of the service :return: True if the service is available, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.available com.openssh.sshd ''' try: _get_service(name) return True except CommandExecutionError: return False def missing(name): ''' The inverse of service.available Check that the given service is not available. :param str name: The name of the service :return: True if the service is not available, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.missing com.openssh.sshd ''' return not available(name) def enabled(name, runas=None): ''' Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd ''' # Try to list the service. If it can't be listed, it's not enabled try: list_(name=name, runas=runas) return True except CommandExecutionError: return False def get_all(runas=None): ''' Return a list of services that are enabled or available. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services available or enabled :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_all ''' # Get list of enabled services enabled = get_enabled(runas=runas) # Get list of all services available = list(__utils__['mac_utils.available_services']().keys()) # Return composite list return sorted(set(enabled + available)) def get_enabled(runas=None): ''' Return a list of all services that are enabled. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services enabled on the system :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_enabled ''' # Collect list of enabled services stdout = list_(runas=runas) service_lines = [line for line in stdout.splitlines()] # Construct list of enabled services enabled = [] for line in service_lines: # Skip header line if line.startswith('PID'): continue pid, status, label = line.split('\t') enabled.append(label) return sorted(set(enabled))
saltstack/salt
salt/modules/mac_service.py
get_all
python
def get_all(runas=None): ''' Return a list of services that are enabled or available. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services available or enabled :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_all ''' # Get list of enabled services enabled = get_enabled(runas=runas) # Get list of all services available = list(__utils__['mac_utils.available_services']().keys()) # Return composite list return sorted(set(enabled + available))
Return a list of services that are enabled or available. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services available or enabled :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_all
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L633-L656
[ "def get_enabled(runas=None):\n '''\n Return a list of all services that are enabled. Can be used to find the\n name of a service.\n\n :param str runas: User to run launchctl commands\n\n :return: A list of all the services enabled on the system\n :rtype: list\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.get_enabled\n '''\n # Collect list of enabled services\n stdout = list_(runas=runas)\n service_lines = [line for line in stdout.splitlines()]\n\n # Construct list of enabled services\n enabled = []\n for line in service_lines:\n # Skip header line\n if line.startswith('PID'):\n continue\n\n pid, status, label = line.split('\\t')\n enabled.append(label)\n\n return sorted(set(enabled))\n" ]
# -*- coding: utf-8 -*- ''' The service module for macOS .. versionadded:: 2016.3.0 This module has support for services in the following locations. .. code-block:: bash /System/Library/LaunchDaemons/ /System/Library/LaunchAgents/ /Library/LaunchDaemons/ /Library/LaunchAgents/ # As of version "2019.2.0" support for user-specific services were added. /Users/foo/Library/LaunchAgents/ .. note:: As of the 2019.2.0 release, if a service is located in a ``LaunchAgent`` path and a ``runas`` user is NOT specified, the current console user will be used to properly interact with the service. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os import re # Import salt libs import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd party libs from salt.ext import six # Define the module's virtual name __virtualname__ = 'service' __func_alias__ = { 'list_': 'list', } log = logging.getLogger(__name__) def __virtual__(): ''' Only for macOS with launchctl ''' if not salt.utils.platform.is_darwin(): return (False, 'Failed to load the mac_service module:\n' 'Only available on macOS systems.') if not salt.utils.path.which('launchctl'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "launchctl"') if not salt.utils.path.which('plutil'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "plutil"') if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'): return (False, 'Failed to load the mac_service module:\n' 'Requires macOS 10.11 or newer') return __virtualname__ def _name_in_services(name, services): ''' Checks to see if the given service is in the given services. :param str name: Service label, file name, or full path :param dict services: The currently available services. :return: The service information for the service, otherwise an empty dictionary :rtype: dict ''' if name in services: # Match on label return services[name] for service in six.itervalues(services): if service['file_path'].lower() == name: # Match on full path return service basename, ext = os.path.splitext(service['file_name']) if basename.lower() == name: # Match on basename return service return dict() def _get_service(name): ''' Get information about a service. If the service is not found, raise an error :param str name: Service label, file name, or full path :return: The service information for the service, otherwise an Error :rtype: dict ''' services = __utils__['mac_utils.available_services']() name = name.lower() service = _name_in_services(name, services) # if we would the service we can return it if service: return service # if we got here our service is not available, now we can check to see if # we received a cached batch of services, if not we did a fresh check # so we need to raise that the service could not be found. try: if not __context__['using_cached_services']: raise CommandExecutionError('Service not found: {0}'.format(name)) except KeyError: pass # we used a cached version to check, a service could have been made # between now and then, we should refresh our available services. services = __utils__['mac_utils.available_services'](refresh=True) # check to see if we found the service we are looking for. service = _name_in_services(name, services) if not service: # Could not find the service after refresh raise. raise CommandExecutionError('Service not found: {0}'.format(name)) # found it :) return service def _always_running_service(name): ''' Check if the service should always be running based on the KeepAlive Key in the service plist. :param str name: Service label, file name, or full path :return: True if the KeepAlive key is set to True, False if set to False or not set in the plist at all. :rtype: bool .. versionadded:: 2019.2.0 ''' # get all the info from the launchctl service service_info = show(name) # get the value for the KeepAlive key in service plist try: keep_alive = service_info['plist']['KeepAlive'] except KeyError: return False # check if KeepAlive is True and not just set. if isinstance(keep_alive, dict): # check for pathstate for _file, value in six.iteritems(keep_alive.get('PathState', {})): if value is True and os.path.exists(_file): return True elif value is False and not os.path.exists(_file): return True if keep_alive is True: return True return False def _get_domain_target(name, service_target=False): ''' Returns the domain/service target and path for a service. This is used to determine whether or not a service should be loaded in a user space or system space. :param str name: Service label, file name, or full path :param bool service_target: Whether to return a full service target. This is needed for the enable and disable subcommands of /bin/launchctl. Defaults to False :return: Tuple of the domain/service target and the path to the service. :rtype: tuple .. versionadded:: 2019.2.0 ''' # Get service information service = _get_service(name) # get the path to the service path = service['file_path'] # most of the time we'll be at the system level. domain_target = 'system' # check if a LaunchAgent as we should treat these differently. if 'LaunchAgents' in path: # Get the console user so we can service in the correct session uid = __utils__['mac_utils.console_user']() domain_target = 'gui/{}'.format(uid) # check to see if we need to make it a full service target. if service_target is True: domain_target = '{}/{}'.format(domain_target, service['plist']['Label']) return (domain_target, path) def _launch_agent(name): ''' Checks to see if the provided service is a LaunchAgent :param str name: Service label, file name, or full path :return: True if a LaunchAgent, False if not. :rtype: bool .. versionadded:: 2019.2.0 ''' # Get the path to the service. path = _get_service(name)['file_path'] if 'LaunchAgents' not in path: return False return True def show(name): ''' Show properties of a launchctl service :param str name: Service label, file name, or full path :return: The service information if the service is found :rtype: dict CLI Example: .. code-block:: bash salt '*' service.show org.cups.cupsd # service label salt '*' service.show org.cups.cupsd.plist # file name salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path ''' return _get_service(name) def launchctl(sub_cmd, *args, **kwargs): ''' Run a launchctl command and raise an error if it fails :param str sub_cmd: Sub command supplied to launchctl :param tuple args: Tuple containing additional arguments to pass to launchctl :param dict kwargs: Dictionary containing arguments to pass to ``cmd.run_all`` :param bool return_stdout: A keyword argument. If true return the stdout of the launchctl command :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or the stdout of the launchctl command if requested :rtype: bool, str CLI Example: .. code-block:: bash salt '*' service.launchctl debug org.cups.cupsd ''' return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs) def list_(name=None, runas=None): ''' Run launchctl list and return the output :param str name: The name of the service to list :param str runas: User to run launchctl commands :return: If a name is passed returns information about the named service, otherwise returns a list of all services and pids :rtype: str CLI Example: .. code-block:: bash salt '*' service.list salt '*' service.list org.cups.cupsd ''' if name: # Get service information and label service = _get_service(name) label = service['plist']['Label'] # we can assume if we are trying to list a LaunchAgent we need # to run as a user, if not provided, we'll use the console user. if not runas and _launch_agent(name): runas = __utils__['mac_utils.console_user'](username=True) # Collect information on service: will raise an error if it fails return launchctl('list', label, return_stdout=True, runas=runas) # Collect information on all services: will raise an error if it fails return launchctl('list', return_stdout=True, runas=runas) def enable(name, runas=None): ''' Enable a launchd service. Raises an error if the service fails to be enabled :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already enabled :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enable org.cups.cupsd ''' # Get the domain target. enable requires a full <service-target> service_target = _get_domain_target(name, service_target=True)[0] # Enable the service: will raise an error if it fails return launchctl('enable', service_target, runas=runas) def disable(name, runas=None): ''' Disable a launchd service. Raises an error if the service fails to be disabled :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already disabled :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disable org.cups.cupsd ''' # Get the service target. enable requires a full <service-target> service_target = _get_domain_target(name, service_target=True)[0] # disable the service: will raise an error if it fails return launchctl('disable', service_target, runas=runas) def start(name, runas=None): ''' Start a launchd service. Raises an error if the service fails to start .. note:: To start a service in macOS the service must be enabled first. Use ``service.enable`` to enable the service. :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already running :rtype: bool CLI Example: .. code-block:: bash salt '*' service.start org.cups.cupsd ''' # Get the domain target. domain_target, path = _get_domain_target(name) # Load (bootstrap) the service: will raise an error if it fails return launchctl('bootstrap', domain_target, path, runas=runas) def stop(name, runas=None): ''' Stop a launchd service. Raises an error if the service fails to stop .. note:: Though ``service.stop`` will unload a service in macOS, the service will start on next boot unless it is disabled. Use ``service.disable`` to disable the service :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already stopped :rtype: bool CLI Example: .. code-block:: bash salt '*' service.stop org.cups.cupsd ''' # Get the domain target. domain_target, path = _get_domain_target(name) # Stop (bootout) the service: will raise an error if it fails return launchctl('bootout', domain_target, path, runas=runas) def restart(name, runas=None): ''' Unloads and reloads a launchd service. Raises an error if the service fails to reload :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful :rtype: bool CLI Example: .. code-block:: bash salt '*' service.restart org.cups.cupsd ''' # Restart the service: will raise an error if it fails if enabled(name): stop(name, runas=runas) start(name, runas=runas) return True def status(name, sig=None, runas=None): ''' Return the status for a service. :param str name: Used to find the service from launchctl. Can be any part of the service name or a regex expression. :param str sig: Find the service with status.pid instead. Note that ``name`` must still be provided. :param str runas: User to run launchctl commands :return: The PID for the service if it is running, or 'loaded' if the service should not always have a PID, or otherwise an empty string :rtype: str CLI Example: .. code-block:: bash salt '*' service.status cups ''' # Find service with ps if sig: return __salt__['status.pid'](sig) try: _get_service(name) except CommandExecutionError as msg: log.error(msg) return '' if not runas and _launch_agent(name): runas = __utils__['mac_utils.console_user'](username=True) output = list_(runas=runas) # Used a string here instead of a list because that's what the linux version # of this module does pids = '' for line in output.splitlines(): if 'PID' in line: continue if re.search(name, line.split()[-1]): if line.split()[0].isdigit(): if pids: pids += '\n' pids += line.split()[0] # mac services are a little different than other platforms as they may be # set to run on intervals and may not always active with a PID. This will # return a string 'loaded' if it shouldn't always be running and is enabled. if not _always_running_service(name) and enabled(name) and not pids: return 'loaded' return pids def available(name): ''' Check that the given service is available. :param str name: The name of the service :return: True if the service is available, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.available com.openssh.sshd ''' try: _get_service(name) return True except CommandExecutionError: return False def missing(name): ''' The inverse of service.available Check that the given service is not available. :param str name: The name of the service :return: True if the service is not available, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.missing com.openssh.sshd ''' return not available(name) def enabled(name, runas=None): ''' Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd ''' # Try to list the service. If it can't be listed, it's not enabled try: list_(name=name, runas=runas) return True except CommandExecutionError: return False def disabled(name, runas=None, domain='system'): ''' Check if the specified service is not enabled. This is the opposite of ``service.enabled`` :param str name: The name to look up :param str runas: User to run launchctl commands :param str domain: domain to check for disabled services. Default is system. :return: True if the specified service is NOT enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disabled org.cups.cupsd ''' disabled = launchctl('print-disabled', domain, return_stdout=True, runas=runas) for service in disabled.split("\n"): if name in service: srv_name = service.split("=>")[0].split("\"")[1] status = service.split("=>")[1] if name != srv_name: pass else: return True if 'true' in status.lower() else False return False def get_enabled(runas=None): ''' Return a list of all services that are enabled. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services enabled on the system :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_enabled ''' # Collect list of enabled services stdout = list_(runas=runas) service_lines = [line for line in stdout.splitlines()] # Construct list of enabled services enabled = [] for line in service_lines: # Skip header line if line.startswith('PID'): continue pid, status, label = line.split('\t') enabled.append(label) return sorted(set(enabled))
saltstack/salt
salt/modules/mac_service.py
get_enabled
python
def get_enabled(runas=None): ''' Return a list of all services that are enabled. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services enabled on the system :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_enabled ''' # Collect list of enabled services stdout = list_(runas=runas) service_lines = [line for line in stdout.splitlines()] # Construct list of enabled services enabled = [] for line in service_lines: # Skip header line if line.startswith('PID'): continue pid, status, label = line.split('\t') enabled.append(label) return sorted(set(enabled))
Return a list of all services that are enabled. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services enabled on the system :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_enabled
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L659-L689
[ "def list_(name=None, runas=None):\n '''\n Run launchctl list and return the output\n\n :param str name: The name of the service to list\n\n :param str runas: User to run launchctl commands\n\n :return: If a name is passed returns information about the named service,\n otherwise returns a list of all services and pids\n :rtype: str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.list\n salt '*' service.list org.cups.cupsd\n '''\n if name:\n # Get service information and label\n service = _get_service(name)\n label = service['plist']['Label']\n\n # we can assume if we are trying to list a LaunchAgent we need\n # to run as a user, if not provided, we'll use the console user.\n if not runas and _launch_agent(name):\n runas = __utils__['mac_utils.console_user'](username=True)\n\n # Collect information on service: will raise an error if it fails\n return launchctl('list',\n label,\n return_stdout=True,\n runas=runas)\n\n # Collect information on all services: will raise an error if it fails\n return launchctl('list',\n return_stdout=True,\n runas=runas)\n" ]
# -*- coding: utf-8 -*- ''' The service module for macOS .. versionadded:: 2016.3.0 This module has support for services in the following locations. .. code-block:: bash /System/Library/LaunchDaemons/ /System/Library/LaunchAgents/ /Library/LaunchDaemons/ /Library/LaunchAgents/ # As of version "2019.2.0" support for user-specific services were added. /Users/foo/Library/LaunchAgents/ .. note:: As of the 2019.2.0 release, if a service is located in a ``LaunchAgent`` path and a ``runas`` user is NOT specified, the current console user will be used to properly interact with the service. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os import re # Import salt libs import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd party libs from salt.ext import six # Define the module's virtual name __virtualname__ = 'service' __func_alias__ = { 'list_': 'list', } log = logging.getLogger(__name__) def __virtual__(): ''' Only for macOS with launchctl ''' if not salt.utils.platform.is_darwin(): return (False, 'Failed to load the mac_service module:\n' 'Only available on macOS systems.') if not salt.utils.path.which('launchctl'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "launchctl"') if not salt.utils.path.which('plutil'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "plutil"') if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'): return (False, 'Failed to load the mac_service module:\n' 'Requires macOS 10.11 or newer') return __virtualname__ def _name_in_services(name, services): ''' Checks to see if the given service is in the given services. :param str name: Service label, file name, or full path :param dict services: The currently available services. :return: The service information for the service, otherwise an empty dictionary :rtype: dict ''' if name in services: # Match on label return services[name] for service in six.itervalues(services): if service['file_path'].lower() == name: # Match on full path return service basename, ext = os.path.splitext(service['file_name']) if basename.lower() == name: # Match on basename return service return dict() def _get_service(name): ''' Get information about a service. If the service is not found, raise an error :param str name: Service label, file name, or full path :return: The service information for the service, otherwise an Error :rtype: dict ''' services = __utils__['mac_utils.available_services']() name = name.lower() service = _name_in_services(name, services) # if we would the service we can return it if service: return service # if we got here our service is not available, now we can check to see if # we received a cached batch of services, if not we did a fresh check # so we need to raise that the service could not be found. try: if not __context__['using_cached_services']: raise CommandExecutionError('Service not found: {0}'.format(name)) except KeyError: pass # we used a cached version to check, a service could have been made # between now and then, we should refresh our available services. services = __utils__['mac_utils.available_services'](refresh=True) # check to see if we found the service we are looking for. service = _name_in_services(name, services) if not service: # Could not find the service after refresh raise. raise CommandExecutionError('Service not found: {0}'.format(name)) # found it :) return service def _always_running_service(name): ''' Check if the service should always be running based on the KeepAlive Key in the service plist. :param str name: Service label, file name, or full path :return: True if the KeepAlive key is set to True, False if set to False or not set in the plist at all. :rtype: bool .. versionadded:: 2019.2.0 ''' # get all the info from the launchctl service service_info = show(name) # get the value for the KeepAlive key in service plist try: keep_alive = service_info['plist']['KeepAlive'] except KeyError: return False # check if KeepAlive is True and not just set. if isinstance(keep_alive, dict): # check for pathstate for _file, value in six.iteritems(keep_alive.get('PathState', {})): if value is True and os.path.exists(_file): return True elif value is False and not os.path.exists(_file): return True if keep_alive is True: return True return False def _get_domain_target(name, service_target=False): ''' Returns the domain/service target and path for a service. This is used to determine whether or not a service should be loaded in a user space or system space. :param str name: Service label, file name, or full path :param bool service_target: Whether to return a full service target. This is needed for the enable and disable subcommands of /bin/launchctl. Defaults to False :return: Tuple of the domain/service target and the path to the service. :rtype: tuple .. versionadded:: 2019.2.0 ''' # Get service information service = _get_service(name) # get the path to the service path = service['file_path'] # most of the time we'll be at the system level. domain_target = 'system' # check if a LaunchAgent as we should treat these differently. if 'LaunchAgents' in path: # Get the console user so we can service in the correct session uid = __utils__['mac_utils.console_user']() domain_target = 'gui/{}'.format(uid) # check to see if we need to make it a full service target. if service_target is True: domain_target = '{}/{}'.format(domain_target, service['plist']['Label']) return (domain_target, path) def _launch_agent(name): ''' Checks to see if the provided service is a LaunchAgent :param str name: Service label, file name, or full path :return: True if a LaunchAgent, False if not. :rtype: bool .. versionadded:: 2019.2.0 ''' # Get the path to the service. path = _get_service(name)['file_path'] if 'LaunchAgents' not in path: return False return True def show(name): ''' Show properties of a launchctl service :param str name: Service label, file name, or full path :return: The service information if the service is found :rtype: dict CLI Example: .. code-block:: bash salt '*' service.show org.cups.cupsd # service label salt '*' service.show org.cups.cupsd.plist # file name salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path ''' return _get_service(name) def launchctl(sub_cmd, *args, **kwargs): ''' Run a launchctl command and raise an error if it fails :param str sub_cmd: Sub command supplied to launchctl :param tuple args: Tuple containing additional arguments to pass to launchctl :param dict kwargs: Dictionary containing arguments to pass to ``cmd.run_all`` :param bool return_stdout: A keyword argument. If true return the stdout of the launchctl command :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or the stdout of the launchctl command if requested :rtype: bool, str CLI Example: .. code-block:: bash salt '*' service.launchctl debug org.cups.cupsd ''' return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs) def list_(name=None, runas=None): ''' Run launchctl list and return the output :param str name: The name of the service to list :param str runas: User to run launchctl commands :return: If a name is passed returns information about the named service, otherwise returns a list of all services and pids :rtype: str CLI Example: .. code-block:: bash salt '*' service.list salt '*' service.list org.cups.cupsd ''' if name: # Get service information and label service = _get_service(name) label = service['plist']['Label'] # we can assume if we are trying to list a LaunchAgent we need # to run as a user, if not provided, we'll use the console user. if not runas and _launch_agent(name): runas = __utils__['mac_utils.console_user'](username=True) # Collect information on service: will raise an error if it fails return launchctl('list', label, return_stdout=True, runas=runas) # Collect information on all services: will raise an error if it fails return launchctl('list', return_stdout=True, runas=runas) def enable(name, runas=None): ''' Enable a launchd service. Raises an error if the service fails to be enabled :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already enabled :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enable org.cups.cupsd ''' # Get the domain target. enable requires a full <service-target> service_target = _get_domain_target(name, service_target=True)[0] # Enable the service: will raise an error if it fails return launchctl('enable', service_target, runas=runas) def disable(name, runas=None): ''' Disable a launchd service. Raises an error if the service fails to be disabled :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already disabled :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disable org.cups.cupsd ''' # Get the service target. enable requires a full <service-target> service_target = _get_domain_target(name, service_target=True)[0] # disable the service: will raise an error if it fails return launchctl('disable', service_target, runas=runas) def start(name, runas=None): ''' Start a launchd service. Raises an error if the service fails to start .. note:: To start a service in macOS the service must be enabled first. Use ``service.enable`` to enable the service. :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already running :rtype: bool CLI Example: .. code-block:: bash salt '*' service.start org.cups.cupsd ''' # Get the domain target. domain_target, path = _get_domain_target(name) # Load (bootstrap) the service: will raise an error if it fails return launchctl('bootstrap', domain_target, path, runas=runas) def stop(name, runas=None): ''' Stop a launchd service. Raises an error if the service fails to stop .. note:: Though ``service.stop`` will unload a service in macOS, the service will start on next boot unless it is disabled. Use ``service.disable`` to disable the service :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful or if the service is already stopped :rtype: bool CLI Example: .. code-block:: bash salt '*' service.stop org.cups.cupsd ''' # Get the domain target. domain_target, path = _get_domain_target(name) # Stop (bootout) the service: will raise an error if it fails return launchctl('bootout', domain_target, path, runas=runas) def restart(name, runas=None): ''' Unloads and reloads a launchd service. Raises an error if the service fails to reload :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful :rtype: bool CLI Example: .. code-block:: bash salt '*' service.restart org.cups.cupsd ''' # Restart the service: will raise an error if it fails if enabled(name): stop(name, runas=runas) start(name, runas=runas) return True def status(name, sig=None, runas=None): ''' Return the status for a service. :param str name: Used to find the service from launchctl. Can be any part of the service name or a regex expression. :param str sig: Find the service with status.pid instead. Note that ``name`` must still be provided. :param str runas: User to run launchctl commands :return: The PID for the service if it is running, or 'loaded' if the service should not always have a PID, or otherwise an empty string :rtype: str CLI Example: .. code-block:: bash salt '*' service.status cups ''' # Find service with ps if sig: return __salt__['status.pid'](sig) try: _get_service(name) except CommandExecutionError as msg: log.error(msg) return '' if not runas and _launch_agent(name): runas = __utils__['mac_utils.console_user'](username=True) output = list_(runas=runas) # Used a string here instead of a list because that's what the linux version # of this module does pids = '' for line in output.splitlines(): if 'PID' in line: continue if re.search(name, line.split()[-1]): if line.split()[0].isdigit(): if pids: pids += '\n' pids += line.split()[0] # mac services are a little different than other platforms as they may be # set to run on intervals and may not always active with a PID. This will # return a string 'loaded' if it shouldn't always be running and is enabled. if not _always_running_service(name) and enabled(name) and not pids: return 'loaded' return pids def available(name): ''' Check that the given service is available. :param str name: The name of the service :return: True if the service is available, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.available com.openssh.sshd ''' try: _get_service(name) return True except CommandExecutionError: return False def missing(name): ''' The inverse of service.available Check that the given service is not available. :param str name: The name of the service :return: True if the service is not available, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.missing com.openssh.sshd ''' return not available(name) def enabled(name, runas=None): ''' Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd ''' # Try to list the service. If it can't be listed, it's not enabled try: list_(name=name, runas=runas) return True except CommandExecutionError: return False def disabled(name, runas=None, domain='system'): ''' Check if the specified service is not enabled. This is the opposite of ``service.enabled`` :param str name: The name to look up :param str runas: User to run launchctl commands :param str domain: domain to check for disabled services. Default is system. :return: True if the specified service is NOT enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.disabled org.cups.cupsd ''' disabled = launchctl('print-disabled', domain, return_stdout=True, runas=runas) for service in disabled.split("\n"): if name in service: srv_name = service.split("=>")[0].split("\"")[1] status = service.split("=>")[1] if name != srv_name: pass else: return True if 'true' in status.lower() else False return False def get_all(runas=None): ''' Return a list of services that are enabled or available. Can be used to find the name of a service. :param str runas: User to run launchctl commands :return: A list of all the services available or enabled :rtype: list CLI Example: .. code-block:: bash salt '*' service.get_all ''' # Get list of enabled services enabled = get_enabled(runas=runas) # Get list of all services available = list(__utils__['mac_utils.available_services']().keys()) # Return composite list return sorted(set(enabled + available))
saltstack/salt
salt/cloud/clouds/nova.py
get_conn
python
def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn
Return a conn object for the passed VM data
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L301-L324
[ "def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('user', 'tenant', 'identity_url', 'compute_region',)\n )\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
avail_locations
python
def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret
Return a list of locations
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L327-L345
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def get_entry(dict_, key, value, raise_error=True):\n for entry in dict_:\n if entry[key] == value:\n return entry\n if raise_error is True:\n raise SaltCloudSystemExit('Unable to find {0} in {1}.'.format(key, dict_))\n return {}\n", "def get_catalog(self):\n '''\n Return service catalog\n '''\n return self.catalog\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
get_image
python
def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) )
Return the image object to use
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L348-L374
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
show_instance
python
def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node
Show the details from the provider concerning an instance
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L388-L400
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def show_instance(self, name):\n '''\n Find a server by its name (libcloud)\n '''\n return self.server_by_name(name)\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
get_size
python
def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) )
Return the VM's size object
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L403-L417
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
ignore_cidr
python
def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False
Return True if we are to ignore the specified IP. Compatible with IPv4.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L441-L456
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
rackconnect
python
def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False )
Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L470-L478
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
rackconnectv3
python
def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False )
Determine if server is using rackconnectv3 or not Return the rackconnect network name or False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L481-L489
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
cloudnetwork
python
def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False )
Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L492-L500
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
managedcloud
python
def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False )
Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L503-L511
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
destroy
python
def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False
Delete a single VM
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L514-L575
[ "def get_local_client(\n c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),\n mopts=None,\n skip_perm_errors=False,\n io_loop=None,\n auto_reconnect=False):\n '''\n .. versionadded:: 2014.7.0\n\n Read in the config and return the correct LocalClient object based on\n the configured transport\n\n :param IOLoop io_loop: io_loop used for events.\n Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n if mopts:\n opts = mopts\n else:\n # Late import to prevent circular import\n import salt.config\n opts = salt.config.client_config(c_path)\n\n # TODO: AIO core is separate from transport\n return LocalClient(\n mopts=opts,\n skip_perm_errors=skip_perm_errors,\n io_loop=io_loop,\n auto_reconnect=auto_reconnect)\n", "def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('user', 'tenant', 'identity_url', 'compute_region',)\n )\n", "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def remove_sshkey(host, known_hosts=None):\n '''\n Remove a host from the known_hosts file\n '''\n if known_hosts is None:\n if 'HOME' in os.environ:\n known_hosts = '{0}/.ssh/known_hosts'.format(os.environ['HOME'])\n else:\n try:\n known_hosts = '{0}/.ssh/known_hosts'.format(\n pwd.getpwuid(os.getuid()).pwd_dir\n )\n except Exception:\n pass\n\n if known_hosts is not None:\n log.debug(\n 'Removing ssh key for %s from known hosts file %s',\n host, known_hosts\n )\n else:\n log.debug('Removing ssh key for %s from known hosts file', host)\n\n cmd = 'ssh-keygen -R {0}'.format(host)\n subprocess.call(cmd, shell=True)\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
request_instance
python
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_
Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L578-L791
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n", "def get_image(conn, vm_):\n '''\n Return the image object to use\n '''\n vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode(\n 'ascii', 'salt-cloud-force-ascii'\n )\n if not vm_image:\n log.debug('No image set, must be boot from volume')\n return None\n\n image_list = conn.image_list()\n\n for img in image_list:\n if vm_image in (image_list[img]['id'], img):\n return image_list[img]['id']\n\n try:\n image = conn.image_show(vm_image)\n return image['id']\n except novaclient.exceptions.NotFound as exc:\n raise SaltCloudNotFound(\n 'The specified image, \\'{0}\\', could not be found: {1}'.format(\n vm_image,\n exc\n )\n )\n", "def get_size(conn, vm_):\n '''\n Return the VM's size object\n '''\n sizes = conn.list_sizes()\n vm_size = config.get_cloud_config_value('size', vm_, __opts__)\n if not vm_size:\n return sizes[0]\n\n for size in sizes:\n if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)):\n return sizes[size]['id']\n raise SaltCloudNotFound(\n 'The specified size, \\'{0}\\', could not be found.'.format(vm_size)\n )\n", "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def userdata_template(opts, vm_, userdata):\n '''\n Use the configured templating engine to template the userdata file\n '''\n # No userdata, no need to template anything\n if userdata is None:\n return userdata\n\n userdata_template = salt.config.get_cloud_config_value(\n 'userdata_template', vm_, opts, search_global=False, default=None\n )\n if userdata_template is False:\n return userdata\n # Use the cloud profile's userdata_template, otherwise get it from the\n # master configuration file.\n renderer = opts.get('userdata_template') \\\n if userdata_template is None \\\n else userdata_template\n if renderer is None:\n return userdata\n else:\n render_opts = opts.copy()\n render_opts.update(vm_)\n rend = salt.loader.render(render_opts, {})\n blacklist = opts['renderer_blacklist']\n whitelist = opts['renderer_whitelist']\n templated = salt.template.compile_template(\n ':string:',\n rend,\n renderer,\n blacklist,\n whitelist,\n input_data=userdata,\n )\n if not isinstance(templated, six.string_types):\n # template renderers like \"jinja\" should return a StringIO\n try:\n templated = ''.join(templated.readlines())\n except AttributeError:\n log.warning(\n 'Templated userdata resulted in non-string result (%s), '\n 'converting to string', templated\n )\n templated = six.text_type(templated)\n\n return templated\n", "def check_name(name, safe_chars):\n '''\n Check whether the specified name contains invalid characters\n '''\n regexp = re.compile('[^{0}]'.format(safe_chars))\n if regexp.search(name):\n raise SaltCloudException(\n '{0} contains characters not supported by this cloud provider. '\n 'Valid characters are: {1}'.format(\n name, safe_chars\n )\n )\n", "def get_block_mapping_opts(vm_):\n ret = {}\n ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={})\n ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[])\n ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[])\n ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None)\n ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None)\n ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
create
python
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret
Create a single VM from a data dict
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L934-L1077
[ "def destroy(name, conn=None, call=None):\n '''\n Delete a single VM\n '''\n if call == 'function':\n raise SaltCloudSystemExit(\n 'The destroy action must be called with -d, --destroy, '\n '-a or --action.'\n )\n\n __utils__['cloud.fire_event'](\n 'event',\n 'destroying instance',\n 'salt/cloud/{0}/destroying'.format(name),\n args={'name': name},\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n\n if not conn:\n conn = get_conn() # pylint: disable=E0602\n\n node = conn.server_by_name(name)\n profiles = get_configured_provider()['profiles'] # pylint: disable=E0602\n if node is None:\n log.error('Unable to find the VM %s', name)\n profile = None\n if 'metadata' in node.extra and 'profile' in node.extra['metadata']:\n profile = node.extra['metadata']['profile']\n\n flush_mine_on_destroy = False\n if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:\n flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']\n\n if flush_mine_on_destroy:\n log.info('Clearing Salt Mine: %s', name)\n salt_client = salt.client.get_local_client(__opts__['conf_file'])\n minions = salt_client.cmd(name, 'mine.flush')\n\n log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy)\n log.info('Destroying VM: %s', name)\n ret = conn.delete(node.id)\n if ret:\n log.info('Destroyed VM: %s', name)\n # Fire destroy action\n __utils__['cloud.fire_event'](\n 'event',\n 'destroyed instance',\n 'salt/cloud/{0}/destroyed'.format(name),\n args={'name': name},\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n if __opts__.get('delete_sshkeys', False) is True:\n salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0])\n if __opts__.get('update_cachedir', False) is True:\n __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)\n __utils__['cloud.cachedir_index_del'](name)\n return True\n\n log.error('Failed to Destroy VM: %s', name)\n return False\n", "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n", "def gen_keys(keysize=2048):\n '''\n Generate Salt minion keys and return them as PEM file strings\n '''\n # Mandate that keys are at least 2048 in size\n if keysize < 2048:\n keysize = 2048\n tdir = tempfile.mkdtemp()\n\n salt.crypt.gen_keys(tdir, 'minion', keysize)\n priv_path = os.path.join(tdir, 'minion.pem')\n pub_path = os.path.join(tdir, 'minion.pub')\n with salt.utils.files.fopen(priv_path) as fp_:\n priv = salt.utils.stringutils.to_unicode(fp_.read())\n with salt.utils.files.fopen(pub_path) as fp_:\n pub = salt.utils.stringutils.to_unicode(fp_.read())\n shutil.rmtree(tdir)\n return priv, pub\n", "def ssh_interface(vm_):\n '''\n Return the ssh_interface type to connect to. Either 'public_ips' (default)\n or 'private_ips'.\n '''\n return config.get_cloud_config_value(\n 'ssh_interface', vm_, __opts__, default='public_ips',\n search_global=False\n )\n", "def is_profile_configured(opts, provider, profile_name, vm_=None):\n '''\n Check if the requested profile contains the minimum required parameters for\n a profile.\n\n Required parameters include image and provider for all drivers, while some\n drivers also require size keys.\n\n .. versionadded:: 2015.8.0\n '''\n # Standard dict keys required by all drivers.\n required_keys = ['provider']\n alias, driver = provider.split(':')\n\n # Most drivers need an image to be specified, but some do not.\n non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']\n\n # Most drivers need a size, but some do not.\n non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',\n 'softlayer', 'softlayer_hw', 'vmware', 'vsphere',\n 'virtualbox', 'libvirt', 'oneandone', 'profitbricks']\n\n provider_key = opts['providers'][alias][driver]\n profile_key = opts['providers'][alias][driver]['profiles'][profile_name]\n\n # If cloning on Linode, size and image are not necessary.\n # They are obtained from the to-be-cloned VM.\n if driver == 'linode' and profile_key.get('clonefrom', False):\n non_image_drivers.append('linode')\n non_size_drivers.append('linode')\n elif driver == 'gce' and 'sourceImage' in six.text_type(vm_.get('ex_disks_gce_struct')):\n non_image_drivers.append('gce')\n\n # If cloning on VMware, specifying image is not necessary.\n if driver == 'vmware' and 'image' not in list(profile_key.keys()):\n non_image_drivers.append('vmware')\n\n if driver not in non_image_drivers:\n required_keys.append('image')\n if driver == 'vmware':\n required_keys.append('datastore')\n elif driver in ['linode', 'virtualbox']:\n required_keys.append('clonefrom')\n elif driver == 'nova':\n nova_image_keys = ['image', 'block_device_mapping', 'block_device', 'boot_volume']\n if not any([key in provider_key for key in nova_image_keys]) and not any([key in profile_key for key in nova_image_keys]):\n required_keys.extend(nova_image_keys)\n\n if driver not in non_size_drivers:\n required_keys.append('size')\n\n # Check if required fields are supplied in the provider config. If they\n # are present, remove it from the required_keys list.\n for item in list(required_keys):\n if item in provider_key:\n required_keys.remove(item)\n\n # If a vm_ dict was passed in, use that information to get any other configs\n # that we might have missed thus far, such as a option provided in a map file.\n if vm_:\n for item in list(required_keys):\n if item in vm_:\n required_keys.remove(item)\n\n # Check for remaining required parameters in the profile config.\n for item in required_keys:\n if profile_key.get(item, None) is None:\n # There's at least one required configuration item which is not set.\n log.error(\n \"The required '%s' configuration setting is missing from \"\n \"the '%s' profile, which is configured under the '%s' alias.\",\n item, profile_name, alias\n )\n return False\n\n return True\n", "def wait_for_ip(update_callback,\n update_args=None,\n update_kwargs=None,\n timeout=5 * 60,\n interval=5,\n interval_multiplier=1,\n max_failures=10):\n '''\n Helper function that waits for an IP address for a specific maximum amount\n of time.\n\n :param update_callback: callback function which queries the cloud provider\n for the VM ip address. It must return None if the\n required data, IP included, is not available yet.\n :param update_args: Arguments to pass to update_callback\n :param update_kwargs: Keyword arguments to pass to update_callback\n :param timeout: The maximum amount of time(in seconds) to wait for the IP\n address.\n :param interval: The looping interval, i.e., the amount of time to sleep\n before the next iteration.\n :param interval_multiplier: Increase the interval by this multiplier after\n each request; helps with throttling\n :param max_failures: If update_callback returns ``False`` it's considered\n query failure. This value is the amount of failures\n accepted before giving up.\n :returns: The update_callback returned data\n :raises: SaltCloudExecutionTimeout\n\n '''\n if update_args is None:\n update_args = ()\n if update_kwargs is None:\n update_kwargs = {}\n\n duration = timeout\n while True:\n log.debug(\n 'Waiting for VM IP. Giving up in 00:%02d:%02d.',\n int(timeout // 60), int(timeout % 60)\n )\n data = update_callback(*update_args, **update_kwargs)\n if data is False:\n log.debug(\n '\\'update_callback\\' has returned \\'False\\', which is '\n 'considered a failure. Remaining Failures: %s.', max_failures\n )\n max_failures -= 1\n if max_failures <= 0:\n raise SaltCloudExecutionFailure(\n 'Too many failures occurred while waiting for '\n 'the IP address.'\n )\n elif data is not None:\n return data\n\n if timeout < 0:\n raise SaltCloudExecutionTimeout(\n 'Unable to get IP for 00:{0:02d}:{1:02d}.'.format(\n int(duration // 60),\n int(duration % 60)\n )\n )\n time.sleep(interval)\n timeout -= interval\n\n if interval_multiplier > 1:\n interval *= interval_multiplier\n if interval > timeout:\n interval = timeout + 1\n log.info('Interval multiplier in effect; interval is '\n 'now %ss.', interval)\n", "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def request_instance(vm_=None, call=None):\n '''\n Put together all of the information necessary to request an instance\n through Novaclient and then fire off the request the instance.\n\n Returns data about the instance\n '''\n if call == 'function':\n # Technically this function may be called other ways too, but it\n # definitely cannot be called with --function.\n raise SaltCloudSystemExit(\n 'The request_instance action must be called with -a or --action.'\n )\n log.info('Creating Cloud VM %s', vm_['name'])\n salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-')\n conn = get_conn()\n kwargs = vm_.copy()\n\n try:\n kwargs['image_id'] = get_image(conn, vm_)\n except Exception as exc:\n raise SaltCloudSystemExit(\n 'Error creating {0} on OPENSTACK\\n\\n'\n 'Could not find image {1}: {2}\\n'.format(\n vm_['name'], vm_['image'], exc\n )\n )\n\n try:\n kwargs['flavor_id'] = get_size(conn, vm_)\n except Exception as exc:\n raise SaltCloudSystemExit(\n 'Error creating {0} on OPENSTACK\\n\\n'\n 'Could not find size {1}: {2}\\n'.format(\n vm_['name'], vm_['size'], exc\n )\n )\n\n kwargs['key_name'] = config.get_cloud_config_value(\n 'ssh_key_name', vm_, __opts__, search_global=False\n )\n\n security_groups = config.get_cloud_config_value(\n 'security_groups', vm_, __opts__, search_global=False\n )\n if security_groups is not None:\n vm_groups = security_groups\n avail_groups = conn.secgroup_list()\n group_list = []\n\n for vmg in vm_groups:\n if vmg in [name for name, details in six.iteritems(avail_groups)]:\n group_list.append(vmg)\n else:\n raise SaltCloudNotFound(\n 'No such security group: \\'{0}\\''.format(vmg)\n )\n\n kwargs['security_groups'] = group_list\n\n avz = config.get_cloud_config_value(\n 'availability_zone', vm_, __opts__, default=None, search_global=False\n )\n if avz is not None:\n kwargs['availability_zone'] = avz\n\n kwargs['nics'] = config.get_cloud_config_value(\n 'networks', vm_, __opts__, search_global=False, default=None\n )\n\n files = config.get_cloud_config_value(\n 'files', vm_, __opts__, search_global=False\n )\n if files:\n kwargs['files'] = {}\n for src_path in files:\n if os.path.exists(files[src_path]):\n with salt.utils.files.fopen(files[src_path], 'r') as fp_:\n kwargs['files'][src_path] = fp_.read()\n else:\n kwargs['files'][src_path] = files[src_path]\n\n userdata_file = config.get_cloud_config_value(\n 'userdata_file', vm_, __opts__, search_global=False, default=None\n )\n if userdata_file is not None:\n try:\n with salt.utils.files.fopen(userdata_file, 'r') as fp_:\n kwargs['userdata'] = salt.utils.cloud.userdata_template(\n __opts__, vm_, fp_.read()\n )\n except Exception as exc:\n log.exception(\n 'Failed to read userdata from %s: %s', userdata_file, exc)\n\n kwargs['config_drive'] = config.get_cloud_config_value(\n 'config_drive', vm_, __opts__, search_global=False\n )\n\n kwargs.update(get_block_mapping_opts(vm_))\n\n event_kwargs = {\n 'name': kwargs['name'],\n 'image': kwargs.get('image_id', 'Boot From Volume'),\n 'size': kwargs['flavor_id'],\n }\n\n __utils__['cloud.fire_event'](\n 'event',\n 'requesting instance',\n 'salt/cloud/{0}/requesting'.format(vm_['name']),\n args={\n 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)),\n },\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n\n try:\n data = conn.boot(**kwargs)\n except Exception as exc:\n raise SaltCloudSystemExit(\n 'Error creating {0} on Nova\\n\\n'\n 'The following exception was thrown by libcloud when trying to '\n 'run the initial deployment: {1}\\n'.format(\n vm_['name'], exc\n )\n )\n if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None:\n raise SaltCloudSystemExit('No password returned. Set ssh_key_file.')\n\n floating_ip_conf = config.get_cloud_config_value('floating_ip',\n vm_,\n __opts__,\n search_global=False,\n default={})\n if floating_ip_conf.get('auto_assign', False):\n floating_ip = None\n if floating_ip_conf.get('ip_address', None) is not None:\n ip_address = floating_ip_conf.get('ip_address', None)\n try:\n fl_ip_dict = conn.floating_ip_show(ip_address)\n floating_ip = fl_ip_dict['ip']\n except Exception as err:\n raise SaltCloudSystemExit(\n 'Error assigning floating_ip for {0} on Nova\\n\\n'\n 'The following exception was thrown by libcloud when trying to '\n 'assign a floating ip: {1}\\n'.format(\n vm_['name'], err\n )\n )\n\n else:\n pool = floating_ip_conf.get('pool', 'public')\n try:\n floating_ip = conn.floating_ip_create(pool)['ip']\n except Exception:\n log.info('A new IP address was unable to be allocated. '\n 'An IP address will be pulled from the already allocated list, '\n 'This will cause a race condition when building in parallel.')\n for fl_ip, opts in six.iteritems(conn.floating_ip_list()):\n if opts['fixed_ip'] is None and opts['pool'] == pool:\n floating_ip = fl_ip\n break\n if floating_ip is None:\n log.error('No IP addresses available to allocate for this server: %s', vm_['name'])\n\n def __query_node_data(vm_):\n try:\n node = show_instance(vm_['name'], 'action')\n log.debug('Loaded node data for %s:\\n%s', vm_['name'], pprint.pformat(node))\n except Exception as err:\n log.error(\n 'Failed to get nodes list: %s', err,\n # Show the traceback if the debug logging level is enabled\n exc_info_on_loglevel=logging.DEBUG\n )\n # Trigger a failure in the wait for IP function\n return False\n return node['state'] == 'ACTIVE' or None\n\n # if we associate the floating ip here,then we will fail.\n # As if we attempt to associate a floating IP before the Nova instance has completed building,\n # it will fail.So we should associate it after the Nova instance has completed building.\n try:\n salt.utils.cloud.wait_for_ip(\n __query_node_data,\n update_args=(vm_,)\n )\n except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:\n try:\n # It might be already up, let's destroy it!\n destroy(vm_['name'])\n except SaltCloudSystemExit:\n pass\n finally:\n raise SaltCloudSystemExit(six.text_type(exc))\n\n try:\n conn.floating_ip_associate(vm_['name'], floating_ip)\n vm_['floating_ip'] = floating_ip\n except Exception as exc:\n raise SaltCloudSystemExit(\n 'Error assigning floating_ip for {0} on Nova\\n\\n'\n 'The following exception was thrown by libcloud when trying to '\n 'assign a floating ip: {1}\\n'.format(\n vm_['name'], exc\n )\n )\n\n if not vm_.get('password', None):\n vm_['password'] = data.extra.get('password', '')\n\n return data, vm_\n", "def get_salt_interface(vm_, opts):\n '''\n Return the salt_interface type to connect to. Either 'public_ips' (default)\n or 'private_ips'.\n '''\n salt_host = salt.config.get_cloud_config_value(\n 'salt_interface', vm_, opts, default=False,\n search_global=False\n )\n\n if salt_host is False:\n salt_host = salt.config.get_cloud_config_value(\n 'ssh_interface', vm_, opts, default='public_ips',\n search_global=False\n )\n\n return salt_host\n", "def preferred_ip(vm_, ips):\n '''\n Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.\n '''\n proto = config.get_cloud_config_value(\n 'protocol', vm_, __opts__, default='ipv4', search_global=False\n )\n\n family = socket.AF_INET\n if proto == 'ipv6':\n family = socket.AF_INET6\n for ip in ips:\n try:\n socket.inet_pton(family, ip)\n return ip\n except Exception:\n continue\n\n return False\n", "def secure_password(length=20, use_random=True):\n '''\n Generate a secure password.\n '''\n try:\n length = int(length)\n pw = ''\n while len(pw) < length:\n if HAS_RANDOM and use_random:\n while True:\n try:\n char = salt.utils.stringutils.to_str(get_random_bytes(1))\n break\n except UnicodeDecodeError:\n continue\n pw += re.sub(\n salt.utils.stringutils.to_str(r'\\W'),\n str(), # future lint: disable=blacklisted-function\n char\n )\n else:\n pw += random.SystemRandom().choice(string.ascii_letters + string.digits)\n return pw\n except Exception as exc:\n log.exception('Failed to generate secure passsword')\n raise CommandExecutionError(six.text_type(exc))\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
list_nodes
python
def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret
Return a list of the VMs that in this location
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1096-L1147
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def is_public_ip(ip):\n '''\n Determines whether an IP address falls within one of the private IP ranges\n '''\n if ':' in ip:\n # ipv6\n if ip.startswith('fe80:'):\n # ipv6 link local\n return False\n return True\n addr = ip_to_int(ip)\n if 167772160 < addr < 184549375:\n # 10.0.0.0/8\n return False\n elif 3232235520 < addr < 3232301055:\n # 192.168.0.0/16\n return False\n elif 2886729728 < addr < 2887778303:\n # 172.16.0.0/12\n return False\n elif 2130706432 < addr < 2147483647:\n # 127.0.0.0/8\n return False\n return True\n", "def server_list(self):\n '''\n List servers\n '''\n nt_ks = self.compute_conn\n ret = {}\n for item in nt_ks.servers.list():\n try:\n ret[item.name] = {\n 'id': item.id,\n 'name': item.name,\n 'state': item.status,\n 'accessIPv4': item.accessIPv4,\n 'accessIPv6': item.accessIPv6,\n 'flavor': {'id': item.flavor['id'],\n 'links': item.flavor['links']},\n 'image': {'id': item.image['id'] if item.image else 'Boot From Volume',\n 'links': item.image['links'] if item.image else ''},\n }\n except TypeError:\n pass\n return ret\n", "def server_show(self, server_id):\n '''\n Show details of one server\n '''\n ret = {}\n try:\n servers = self.server_list_detailed()\n except AttributeError:\n raise SaltCloudSystemExit('Corrupt server in server_list_detailed. Remove corrupt servers.')\n for server_name, server in six.iteritems(servers):\n if six.text_type(server['id']) == server_id:\n ret[server_name] = server\n return ret\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
list_nodes_full
python
def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret
Return a list of the VMs that in this location
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1150-L1177
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def server_show_libcloud(self, uuid):\n '''\n Make output look like libcloud output for consistency\n '''\n server_info = self.server_show(uuid)\n server = next(six.itervalues(server_info))\n server_name = next(six.iterkeys(server_info))\n if not hasattr(self, 'password'):\n self.password = None\n ret = NovaServer(server_name, server, self.password)\n\n return ret\n", "def server_list(self):\n '''\n List servers\n '''\n nt_ks = self.compute_conn\n ret = {}\n for item in nt_ks.servers.list():\n try:\n ret[item.name] = {\n 'id': item.id,\n 'name': item.name,\n 'state': item.status,\n 'accessIPv4': item.accessIPv4,\n 'accessIPv6': item.accessIPv6,\n 'flavor': {'id': item.flavor['id'],\n 'links': item.flavor['links']},\n 'image': {'id': item.image['id'] if item.image else 'Boot From Volume',\n 'links': item.image['links'] if item.image else ''},\n }\n except TypeError:\n pass\n return ret\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
list_nodes_min
python
def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list
Return a list of the VMs that in this location
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1180-L1197
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def server_list_min(self):\n '''\n List minimal information about servers\n '''\n nt_ks = self.compute_conn\n ret = {}\n for item in nt_ks.servers.list(detailed=False):\n try:\n ret[item.name] = {\n 'id': item.id,\n 'state': 'Running'\n }\n except TypeError:\n pass\n return ret\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
volume_create
python
def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs)
Create block storage device
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1209-L1219
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def volume_create(self, name, size=100, snapshot=None, voltype=None,\n availability_zone=None):\n '''\n Create a block device\n '''\n if self.volume_conn is None:\n raise SaltCloudSystemExit('No cinder endpoint available')\n nt_ks = self.volume_conn\n response = nt_ks.volumes.create(\n size=size,\n display_name=name,\n volume_type=voltype,\n snapshot_id=snapshot,\n availability_zone=availability_zone\n )\n\n return self._volume_get(response.id)\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
volume_attach
python
def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 )
Attach block volume
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1245-L1255
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def volume_attach(self,\n name,\n server_name,\n device='/dev/xvdb',\n timeout=300):\n '''\n Attach a block device\n '''\n try:\n volume = self.volume_show(name)\n except KeyError as exc:\n raise SaltCloudSystemExit('Unable to find {0} volume: {1}'.format(name, exc))\n server = self.server_by_name(server_name)\n response = self.compute_conn.volumes.create_server_volume(\n server.id,\n volume['id'],\n device=device\n )\n trycount = 0\n start = time.time()\n while True:\n trycount += 1\n try:\n response = self._volume_get(volume['id'])\n if response['status'] == 'in-use':\n return response\n except Exception as exc:\n log.debug('Volume is attaching: %s', name)\n time.sleep(1)\n if time.time() - start > timeout:\n log.error('Timed out after %s seconds '\n 'while waiting for data', timeout)\n return False\n\n log.debug(\n 'Retrying volume_show() (try %s)', trycount\n )\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
volume_create_attach
python
def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret
Create and attach volumes to created node
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1262-L1318
[ "def safe_load(stream, Loader=SaltYamlSafeLoader):\n '''\n .. versionadded:: 2018.3.0\n\n Helper function which automagically uses our custom loader.\n '''\n return yaml.load(stream, Loader=Loader)\n", "def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs):\n '''\n Create block storage device\n '''\n conn = get_conn()\n create_kwargs = {'name': name,\n 'size': size,\n 'snapshot': snapshot,\n 'voltype': voltype}\n create_kwargs['availability_zone'] = kwargs.get('availability_zone', None)\n return conn.volume_create(**create_kwargs)\n", "def volume_attach(name, server_name, device='/dev/xvdb', **kwargs):\n '''\n Attach block volume\n '''\n conn = get_conn()\n return conn.volume_attach(\n name,\n server_name,\n device,\n timeout=300\n )\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
virtual_interface_create
python
def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name)
Create private networks
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1357-L1362
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def virtual_interface_create(self, name, net_name):\n '''\n Add an interfaces to a slice\n '''\n nt_ks = self.compute_conn\n serverid = self._server_uuid_from_name(name)\n networkid = self.network_show(net_name).get('id', None)\n if networkid is None:\n return {net_name: False}\n nets = nt_ks.virtual_interfaces.create(networkid, serverid)\n return nets\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
floating_ip_create
python
def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool'])
Allocate a floating IP .. versionadded:: 2016.3.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1395-L1411
[ "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def floating_ip_create(self, pool=None):\n '''\n Allocate a floating IP\n\n .. versionadded:: 2016.3.0\n '''\n nt_ks = self.compute_conn\n floating_ip = nt_ks.floating_ips.create(pool)\n response = {\n 'ip': floating_ip.ip,\n 'fixed_ip': floating_ip.fixed_ip,\n 'id': floating_ip.id,\n 'instance_id': floating_ip.instance_id,\n 'pool': floating_ip.pool\n }\n return response\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name] def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/nova.py
floating_ip_associate
python
def floating_ip_associate(name, kwargs, call=None): ''' Associate a floating IP address to a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_associate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_associate(name, kwargs['floating_ip']) return list_nodes()[name]
Associate a floating IP address to a server .. versionadded:: 2016.3.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1433-L1450
[ "def list_nodes(call=None, **kwargs):\n '''\n Return a list of the VMs that in this location\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The list_nodes function must be called with -f or --function.'\n )\n\n ret = {}\n conn = get_conn()\n server_list = conn.server_list()\n\n if not server_list:\n return {}\n for server in server_list:\n server_tmp = conn.server_show(server_list[server]['id']).get(server)\n\n # If the server is deleted while looking it up, skip\n if server_tmp is None:\n continue\n\n private = []\n public = []\n if 'addresses' not in server_tmp:\n server_tmp['addresses'] = {}\n for network in server_tmp['addresses']:\n for address in server_tmp['addresses'][network]:\n if salt.utils.cloud.is_public_ip(address.get('addr', '')):\n public.append(address['addr'])\n elif ':' in address['addr']:\n public.append(address['addr'])\n elif '.' in address['addr']:\n private.append(address['addr'])\n\n if server_tmp['accessIPv4']:\n if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']):\n public.append(server_tmp['accessIPv4'])\n else:\n private.append(server_tmp['accessIPv4'])\n if server_tmp['accessIPv6']:\n public.append(server_tmp['accessIPv6'])\n\n ret[server] = {\n 'id': server_tmp['id'],\n 'image': server_tmp['image']['id'],\n 'size': server_tmp['flavor']['id'],\n 'state': server_tmp['state'],\n 'private_ips': private,\n 'public_ips': public,\n }\n return ret\n", "def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n\n kwargs = vm_.copy() # pylint: disable=E1103\n\n kwargs['username'] = vm_['user']\n kwargs['project_id'] = vm_['tenant']\n kwargs['auth_url'] = vm_['identity_url']\n kwargs['region_name'] = vm_['compute_region']\n kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)\n\n if 'password' in vm_:\n kwargs['password'] = vm_['password']\n\n if 'verify' in vm_ and vm_['use_keystoneauth'] is True:\n kwargs['verify'] = vm_['verify']\n elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:\n log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')\n conn = nova.SaltNova(**kwargs)\n\n return conn\n", "def floating_ip_associate(self, server_name, floating_ip):\n '''\n Associate floating IP address to server\n\n .. versionadded:: 2016.3.0\n '''\n nt_ks = self.compute_conn\n server_ = self.server_by_name(server_name)\n server = nt_ks.servers.get(server_.__dict__['id'])\n server.add_floating_ip(floating_ip)\n return self.floating_ip_list()[floating_ip]\n" ]
# -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth` option in the provider config. .. note:: this is required to use keystone v3 as for authentication. .. code-block:: yaml my-openstack-config: use_keystoneauth: True identity_url: 'https://controller:5000/v3' auth_version: 3 compute_name: nova compute_region: RegionOne service_type: compute verify: '/path/to/custom/certs/ca-bundle.crt' tenant: admin user: admin password: passwordgoeshere driver: nova Note: by default the nova driver will attempt to verify its connection utilizing the system certificates. If you need to verify against another bundle of CA certificates or want to skip verification altogether you will need to specify the verify option. You can specify True or False to verify (or not) against system certificates, a path to a bundle or CA certs to check against, or None to allow keystoneauth to search for the certificates on its own.(defaults to True) For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: mypassword tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 boot_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ssh_interface: floating_ips floating_ip: auto_assign: True pool: public Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import logging import socket import pprint # Import Salt Libs from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.utils.yaml import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.config as config from salt.utils.functools import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False __utils__['versions.warn_until']( 'Neon', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the openstack driver instead.' ) return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False) if 'password' in vm_: kwargs['password'] = vm_['password'] if 'verify' in vm_ and vm_['use_keystoneauth'] is True: kwargs['verify'] = vm_['verify'] elif 'verify' in vm_ and vm_['use_keystoneauth'] is False: log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present') conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, exc ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def rackconnectv3(vm_): ''' Determine if server is using rackconnectv3 or not Return the rackconnect network name or False ''' return config.get_cloud_config_value( 'rackconnectv3', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default=False, search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) __utils__['cloud.cachedir_index_del'](name) return True log.error('Failed to Destroy VM: %s', name) return False def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) log.info('Creating Cloud VM %s', vm_['name']) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) kwargs['key_name'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False, default=None ) files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.files.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read() ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) kwargs['config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False ) kwargs.update(get_block_mapping_opts(vm_)) event_kwargs = { 'name': kwargs['name'], 'image': kwargs.get('image_id', 'Boot From Volume'), 'size': kwargs['flavor_id'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) ) if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None: raise SaltCloudSystemExit('No password returned. Set ssh_key_file.') floating_ip_conf = config.get_cloud_config_value('floating_ip', vm_, __opts__, search_global=False, default={}) if floating_ip_conf.get('auto_assign', False): floating_ip = None if floating_ip_conf.get('ip_address', None) is not None: ip_address = floating_ip_conf.get('ip_address', None) try: fl_ip_dict = conn.floating_ip_show(ip_address) floating_ip = fl_ip_dict['ip'] except Exception as err: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], err ) ) else: pool = floating_ip_conf.get('pool', 'public') try: floating_ip = conn.floating_ip_create(pool)['ip'] except Exception: log.info('A new IP address was unable to be allocated. ' 'An IP address will be pulled from the already allocated list, ' 'This will cause a race condition when building in parallel.') for fl_ip, opts in six.iteritems(conn.floating_ip_list()): if opts['fixed_ip'] is None and opts['pool'] == pool: floating_ip = fl_ip break if floating_ip is None: log.error('No IP addresses available to allocate for this server: %s', vm_['name']) def __query_node_data(vm_): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: log.error( 'Failed to get nodes list: %s', err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False return node['state'] == 'ACTIVE' or None # if we associate the floating ip here,then we will fail. # As if we attempt to associate a floating IP before the Nova instance has completed building, # it will fail.So we should associate it after the Nova instance has completed building. try: salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_,) ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) try: conn.floating_ip_associate(vm_['name'], floating_ip) vm_['floating_ip'] = floating_ip except Exception as exc: raise SaltCloudSystemExit( 'Error assigning floating_ip for {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'assign a floating ip: {1}\n'.format( vm_['name'], exc ) ) if not vm_.get('password', None): vm_['password'] = data.extra.get('password', '') return data, vm_ def _query_node_data(vm_, data, conn): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node)) except Exception as err: # Show the traceback if the debug logging level is enabled log.error( 'Failed to get nodes list: %s', err, exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return access_ip = node.get('extra', {}).get('access_ip', '') rcv3 = rackconnectv3(vm_) in node['addresses'] sshif = ssh_interface(vm_) in node['addresses'] if any((rcv3, sshif)): networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['cloudnetwork'] = True # Conditions to pass this # # Rackconnect v2: vm_['rackconnect'] = True # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the # server. In this case we can use the private_ips for ssh_interface, or the access_ip. # # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> # If this is the case, salt will need to use the cloud network to login to the server. There # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud # also cannot use the private_ips, because that traffic is dropped at the hypervisor. # # CloudNetwork: vm['cloudnetwork'] = True # If this is True, then we should have an access_ip at this point set to the ip on the cloud # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will # use the initial access_ip, and not overwrite anything. if (any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != ''): data.public_ips = [access_ip] return data result = [] if ('private_ips' not in node and 'public_ips' not in node and 'floating_ips' not in node and 'fixed_ips' not in node and 'access_ip' in node.get('extra', {})): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) fixed = node.get('fixed_ips', []) floating = node.get('floating_ips', []) if private and not public: log.warning('Private IPs returned, but not public. ' 'Checking for misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) log.warning('Public IP address was not ready when we last checked. ' 'Appending public IP address now.') public = data.public_ips else: log.warning('%s is a private IP', private_ip) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) non_private_ips = [] if public: data.public_ips = public if ssh_interface(vm_) == 'public_ips': non_private_ips.append(public) if floating: data.floating_ips = floating if ssh_interface(vm_) == 'floating_ips': non_private_ips.append(floating) if fixed: data.fixed_ips = fixed if ssh_interface(vm_) == 'fixed_ips': non_private_ips.append(fixed) if non_private_ips: log.debug('result = %s', non_private_ips) data.private_ips = result if ssh_interface(vm_) != 'private_ips': return data if result: log.debug('result = %s', result) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, __opts__ ) ) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True: vm_['password'] = salt.utils.pycrypto.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: %s', salt_ip_address) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: %s', salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data.__dict__) ) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret def avail_images(): ''' Return a dict of all available VM images on the cloud provider. ''' conn = get_conn() return conn.image_list() def avail_sizes(): ''' Return a dict of all available VM sizes on the cloud provider. ''' conn = get_conn() return conn.flavor_list() def list_nodes(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: server_tmp = conn.server_show(server_list[server]['id']).get(server) # If the server is deleted while looking it up, skip if server_tmp is None: continue private = [] public = [] if 'addresses' not in server_tmp: server_tmp['addresses'] = {} for network in server_tmp['addresses']: for address in server_tmp['addresses'][network]: if salt.utils.cloud.is_public_ip(address.get('addr', '')): public.append(address['addr']) elif ':' in address['addr']: public.append(address['addr']) elif '.' in address['addr']: private.append(address['addr']) if server_tmp['accessIPv4']: if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']): public.append(server_tmp['accessIPv4']) else: private.append(server_tmp['accessIPv4']) if server_tmp['accessIPv6']: public.append(server_tmp['accessIPv6']) ret[server] = { 'id': server_tmp['id'], 'image': server_tmp['image']['id'], 'size': server_tmp['flavor']['id'], 'state': server_tmp['state'], 'private_ips': private, 'public_ips': public, } return ret def list_nodes_full(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_full function must be called with' ' -f or --function.' ) ) ret = {} conn = get_conn() server_list = conn.server_list() if not server_list: return {} for server in server_list: try: ret[server] = conn.server_show_libcloud( server_list[server]['id'] ).__dict__ except IndexError as exc: ret = {} __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret def list_nodes_min(call=None, **kwargs): ''' Return a list of the VMs that in this location ''' if call == 'action': raise SaltCloudSystemExit( ( 'The list_nodes_min function must be called with' ' -f or --function.' ) ) conn = get_conn() server_list = conn.server_list_min() if not server_list: return {} return server_list def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs): ''' Create block storage device ''' conn = get_conn() create_kwargs = {'name': name, 'size': size, 'snapshot': snapshot, 'voltype': voltype} create_kwargs['availability_zone'] = kwargs.get('availability_zone', None) return conn.volume_create(**create_kwargs) # Command parity with EC2 and Azure create_volume = volume_create def volume_delete(name, **kwargs): ''' Delete block storage device ''' conn = get_conn() return conn.volume_delete(name) def volume_detach(name, **kwargs): ''' Detach block volume ''' conn = get_conn() return conn.volume_detach( name, timeout=300 ) def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 ) # Command parity with EC2 and Azure attach_volume = volume_attach def volume_create_attach(name, call=None, **kwargs): ''' Create and attach volumes to created node ''' if call == 'function': raise SaltCloudSystemExit( 'The create_attach_volumes action must be called with ' '-a or --action.' ) if type(kwargs['volumes']) is str: volumes = salt.utils.yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] ret = [] for volume in volumes: created = False volume_dict = { 'name': volume['name'], } if 'volume_id' in volume: volume_dict['volume_id'] = volume['volume_id'] elif 'snapshot' in volume: volume_dict['snapshot'] = volume['snapshot'] else: volume_dict['size'] = volume['size'] if 'type' in volume: volume_dict['type'] = volume['type'] if 'iops' in volume: volume_dict['iops'] = volume['iops'] if 'id' not in volume_dict: created_volume = create_volume(**volume_dict) created = True volume_dict.update(created_volume) attach = attach_volume( name=volume['name'], server_name=name, device=volume.get('device', None), call='action' ) if attach: msg = ( '{0} attached to {1} (aka {2})'.format( volume_dict['id'], name, volume_dict['name'], ) ) log.info(msg) ret.append(msg) return ret # Command parity with EC2 and Azure create_attach_volumes = volume_create_attach def volume_list(**kwargs): ''' List block devices ''' conn = get_conn() return conn.volume_list() def network_list(call=None, **kwargs): ''' List private networks ''' conn = get_conn() return conn.network_list() def network_create(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.network_create(name, **kwargs) def virtual_interface_list(name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_list(name) def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name) def floating_ip_pool_list(call=None): ''' List all floating IP pools .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_pool_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_pool_list() def floating_ip_list(call=None): ''' List floating IPs .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_list action must be called with -f or --function' ) conn = get_conn() return conn.floating_ip_list() def floating_ip_create(kwargs, call=None): ''' Allocate a floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_create action must be called with -f or --function' ) if 'pool' not in kwargs: log.error('pool is required') return False conn = get_conn() return conn.floating_ip_create(kwargs['pool']) def floating_ip_delete(kwargs, call=None): ''' De-allocate floating IP .. versionadded:: 2016.3.0 ''' if call != 'function': raise SaltCloudSystemExit( 'The floating_ip_delete action must be called with -f or --function' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() return conn.floating_ip_delete(kwargs['floating_ip']) def floating_ip_disassociate(name, kwargs, call=None): ''' Disassociate a floating IP from a server .. versionadded:: 2016.3.0 ''' if call != 'action': raise SaltCloudSystemExit( 'The floating_ip_disassociate action must be called with -a of --action.' ) if 'floating_ip' not in kwargs: log.error('floating_ip is required') return False conn = get_conn() conn.floating_ip_disassociate(name, kwargs['floating_ip']) return list_nodes()[name]
saltstack/salt
salt/cloud/clouds/gogrid.py
create
python
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret
Create a single VM from a data dict
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L82-L189
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n", "def is_profile_configured(opts, provider, profile_name, vm_=None):\n '''\n Check if the requested profile contains the minimum required parameters for\n a profile.\n\n Required parameters include image and provider for all drivers, while some\n drivers also require size keys.\n\n .. versionadded:: 2015.8.0\n '''\n # Standard dict keys required by all drivers.\n required_keys = ['provider']\n alias, driver = provider.split(':')\n\n # Most drivers need an image to be specified, but some do not.\n non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']\n\n # Most drivers need a size, but some do not.\n non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',\n 'softlayer', 'softlayer_hw', 'vmware', 'vsphere',\n 'virtualbox', 'libvirt', 'oneandone', 'profitbricks']\n\n provider_key = opts['providers'][alias][driver]\n profile_key = opts['providers'][alias][driver]['profiles'][profile_name]\n\n # If cloning on Linode, size and image are not necessary.\n # They are obtained from the to-be-cloned VM.\n if driver == 'linode' and profile_key.get('clonefrom', False):\n non_image_drivers.append('linode')\n non_size_drivers.append('linode')\n elif driver == 'gce' and 'sourceImage' in six.text_type(vm_.get('ex_disks_gce_struct')):\n non_image_drivers.append('gce')\n\n # If cloning on VMware, specifying image is not necessary.\n if driver == 'vmware' and 'image' not in list(profile_key.keys()):\n non_image_drivers.append('vmware')\n\n if driver not in non_image_drivers:\n required_keys.append('image')\n if driver == 'vmware':\n required_keys.append('datastore')\n elif driver in ['linode', 'virtualbox']:\n required_keys.append('clonefrom')\n elif driver == 'nova':\n nova_image_keys = ['image', 'block_device_mapping', 'block_device', 'boot_volume']\n if not any([key in provider_key for key in nova_image_keys]) and not any([key in profile_key for key in nova_image_keys]):\n required_keys.extend(nova_image_keys)\n\n if driver not in non_size_drivers:\n required_keys.append('size')\n\n # Check if required fields are supplied in the provider config. If they\n # are present, remove it from the required_keys list.\n for item in list(required_keys):\n if item in provider_key:\n required_keys.remove(item)\n\n # If a vm_ dict was passed in, use that information to get any other configs\n # that we might have missed thus far, such as a option provided in a map file.\n if vm_:\n for item in list(required_keys):\n if item in vm_:\n required_keys.remove(item)\n\n # Check for remaining required parameters in the profile config.\n for item in required_keys:\n if profile_key.get(item, None) is None:\n # There's at least one required configuration item which is not set.\n log.error(\n \"The required '%s' configuration setting is missing from \"\n \"the '%s' profile, which is configured under the '%s' alias.\",\n item, profile_name, alias\n )\n return False\n\n return True\n", "def avail_images():\n '''\n Available images\n '''\n response = _query('grid', 'image/list')\n\n ret = {}\n for item in response['list']:\n name = item['friendlyName']\n ret[name] = item\n\n return ret\n", "def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None):\n '''\n Make a web call to GoGrid\n\n .. versionadded:: 2015.8.0\n '''\n vm_ = get_configured_provider()\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n sharedsecret = config.get_cloud_config_value(\n 'sharedsecret', vm_, __opts__, search_global=False\n )\n\n path = 'https://api.gogrid.com/api/'\n\n if action:\n path += action\n\n if command:\n path += '/{0}'.format(command)\n\n log.debug('GoGrid URL: %s', path)\n\n if not isinstance(args, dict):\n args = {}\n\n epoch = six.text_type(int(time.time()))\n hashtext = ''.join((apikey, sharedsecret, epoch))\n args['sig'] = salt.utils.hashutils.md5_digest(hashtext)\n args['format'] = 'json'\n args['v'] = '1.0'\n args['api_key'] = apikey\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug('GoGrid Response Status Code: %s', result['status'])\n\n return result['dict']\n", "def wait_for_fun(fun, timeout=900, **kwargs):\n '''\n Wait until a function finishes, or times out\n '''\n start = time.time()\n log.debug('Attempting function %s', fun)\n trycount = 0\n while True:\n trycount += 1\n try:\n response = fun(**kwargs)\n if not isinstance(response, bool):\n return response\n except Exception as exc:\n log.debug('Caught exception in wait_for_fun: %s', exc)\n time.sleep(1)\n log.debug('Retrying function %s on (try %s)', fun, trycount)\n if time.time() - start > timeout:\n log.error('Function timed out: %s', timeout)\n return False\n", "def list_public_ips(kwargs=None, call=None):\n '''\n List all available public IPs.\n\n CLI Example:\n .. code-block:: bash\n\n salt-cloud -f list_public_ips <provider>\n\n To list unavailable (assigned) IPs, use:\n\n CLI Example:\n .. code-block:: bash\n\n salt-cloud -f list_public_ips <provider> state=assigned\n\n .. versionadded:: 2015.8.0\n '''\n if kwargs is None:\n kwargs = {}\n\n args = {}\n if 'state' in kwargs:\n if kwargs['state'] == 'assigned':\n args['ip.state'] = 'Assigned'\n else:\n args['ip.state'] = 'Unassigned'\n else:\n args['ip.state'] = 'Unassigned'\n\n args['ip.type'] = 'Public'\n\n response = _query('grid', 'ip/list', args=args)\n\n ret = {}\n for item in response['list']:\n name = item['ip']\n ret[name] = item\n\n return ret\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
list_nodes
python
def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret
List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L192-L217
[ "def list_nodes_full(call=None):\n '''\n List nodes, with all available information\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -F\n '''\n response = _query('grid', 'server/list')\n\n ret = {}\n for item in response['list']:\n name = item['name']\n ret[name] = item\n\n ret[name]['image_info'] = item['image']\n ret[name]['image'] = item['image']['friendlyName']\n ret[name]['size'] = item['ram']['name']\n ret[name]['public_ips'] = [item['ip']['ip']]\n ret[name]['private_ips'] = []\n ret[name]['state_info'] = item['state']\n if 'active' in item['state']['description']:\n ret[name]['state'] = 'RUNNING'\n\n return ret\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
list_nodes_full
python
def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret
List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L220-L246
[ "def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None):\n '''\n Make a web call to GoGrid\n\n .. versionadded:: 2015.8.0\n '''\n vm_ = get_configured_provider()\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n sharedsecret = config.get_cloud_config_value(\n 'sharedsecret', vm_, __opts__, search_global=False\n )\n\n path = 'https://api.gogrid.com/api/'\n\n if action:\n path += action\n\n if command:\n path += '/{0}'.format(command)\n\n log.debug('GoGrid URL: %s', path)\n\n if not isinstance(args, dict):\n args = {}\n\n epoch = six.text_type(int(time.time()))\n hashtext = ''.join((apikey, sharedsecret, epoch))\n args['sig'] = salt.utils.hashutils.md5_digest(hashtext)\n args['format'] = 'json'\n args['v'] = '1.0'\n args['api_key'] = apikey\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug('GoGrid Response Status Code: %s', result['status'])\n\n return result['dict']\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
avail_locations
python
def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret
Available locations
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L264-L275
[ "def list_common_lookups(kwargs=None, call=None):\n '''\n List common lookups for a particular type of item\n\n .. versionadded:: 2015.8.0\n '''\n if kwargs is None:\n kwargs = {}\n\n args = {}\n if 'lookup' in kwargs:\n args['lookup'] = kwargs['lookup']\n\n response = _query('common', 'lookup/list', args=args)\n\n return response\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
avail_sizes
python
def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret
Available sizes
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L278-L289
[ "def list_common_lookups(kwargs=None, call=None):\n '''\n List common lookups for a particular type of item\n\n .. versionadded:: 2015.8.0\n '''\n if kwargs is None:\n kwargs = {}\n\n args = {}\n if 'lookup' in kwargs:\n args['lookup'] = kwargs['lookup']\n\n response = _query('common', 'lookup/list', args=args)\n\n return response\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
avail_images
python
def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret
Available images
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L292-L303
[ "def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None):\n '''\n Make a web call to GoGrid\n\n .. versionadded:: 2015.8.0\n '''\n vm_ = get_configured_provider()\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n sharedsecret = config.get_cloud_config_value(\n 'sharedsecret', vm_, __opts__, search_global=False\n )\n\n path = 'https://api.gogrid.com/api/'\n\n if action:\n path += action\n\n if command:\n path += '/{0}'.format(command)\n\n log.debug('GoGrid URL: %s', path)\n\n if not isinstance(args, dict):\n args = {}\n\n epoch = six.text_type(int(time.time()))\n hashtext = ''.join((apikey, sharedsecret, epoch))\n args['sig'] = salt.utils.hashutils.md5_digest(hashtext)\n args['format'] = 'json'\n args['v'] = '1.0'\n args['api_key'] = apikey\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug('GoGrid Response Status Code: %s', result['status'])\n\n return result['dict']\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
list_passwords
python
def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret
List all password on the account .. versionadded:: 2015.8.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L306-L322
[ "def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None):\n '''\n Make a web call to GoGrid\n\n .. versionadded:: 2015.8.0\n '''\n vm_ = get_configured_provider()\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n sharedsecret = config.get_cloud_config_value(\n 'sharedsecret', vm_, __opts__, search_global=False\n )\n\n path = 'https://api.gogrid.com/api/'\n\n if action:\n path += action\n\n if command:\n path += '/{0}'.format(command)\n\n log.debug('GoGrid URL: %s', path)\n\n if not isinstance(args, dict):\n args = {}\n\n epoch = six.text_type(int(time.time()))\n hashtext = ''.join((apikey, sharedsecret, epoch))\n args['sig'] = salt.utils.hashutils.md5_digest(hashtext)\n args['format'] = 'json'\n args['v'] = '1.0'\n args['api_key'] = apikey\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug('GoGrid Response Status Code: %s', result['status'])\n\n return result['dict']\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
list_public_ips
python
def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret
List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L325-L364
[ "def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None):\n '''\n Make a web call to GoGrid\n\n .. versionadded:: 2015.8.0\n '''\n vm_ = get_configured_provider()\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n sharedsecret = config.get_cloud_config_value(\n 'sharedsecret', vm_, __opts__, search_global=False\n )\n\n path = 'https://api.gogrid.com/api/'\n\n if action:\n path += action\n\n if command:\n path += '/{0}'.format(command)\n\n log.debug('GoGrid URL: %s', path)\n\n if not isinstance(args, dict):\n args = {}\n\n epoch = six.text_type(int(time.time()))\n hashtext = ''.join((apikey, sharedsecret, epoch))\n args['sig'] = salt.utils.hashutils.md5_digest(hashtext)\n args['format'] = 'json'\n args['v'] = '1.0'\n args['api_key'] = apikey\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug('GoGrid Response Status Code: %s', result['status'])\n\n return result['dict']\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
list_common_lookups
python
def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response
List common lookups for a particular type of item .. versionadded:: 2015.8.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L367-L382
[ "def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None):\n '''\n Make a web call to GoGrid\n\n .. versionadded:: 2015.8.0\n '''\n vm_ = get_configured_provider()\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n sharedsecret = config.get_cloud_config_value(\n 'sharedsecret', vm_, __opts__, search_global=False\n )\n\n path = 'https://api.gogrid.com/api/'\n\n if action:\n path += action\n\n if command:\n path += '/{0}'.format(command)\n\n log.debug('GoGrid URL: %s', path)\n\n if not isinstance(args, dict):\n args = {}\n\n epoch = six.text_type(int(time.time()))\n hashtext = ''.join((apikey, sharedsecret, epoch))\n args['sig'] = salt.utils.hashutils.md5_digest(hashtext)\n args['format'] = 'json'\n args['v'] = '1.0'\n args['api_key'] = apikey\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug('GoGrid Response Status Code: %s', result['status'])\n\n return result['dict']\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
destroy
python
def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response
Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L385-L424
[ "def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None):\n '''\n Make a web call to GoGrid\n\n .. versionadded:: 2015.8.0\n '''\n vm_ = get_configured_provider()\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n sharedsecret = config.get_cloud_config_value(\n 'sharedsecret', vm_, __opts__, search_global=False\n )\n\n path = 'https://api.gogrid.com/api/'\n\n if action:\n path += action\n\n if command:\n path += '/{0}'.format(command)\n\n log.debug('GoGrid URL: %s', path)\n\n if not isinstance(args, dict):\n args = {}\n\n epoch = six.text_type(int(time.time()))\n hashtext = ''.join((apikey, sharedsecret, epoch))\n args['sig'] = salt.utils.hashutils.md5_digest(hashtext)\n args['format'] = 'json'\n args['v'] = '1.0'\n args['api_key'] = apikey\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug('GoGrid Response Status Code: %s', result['status'])\n\n return result['dict']\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
show_instance
python
def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret
Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L472-L498
[ "def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None):\n '''\n Make a web call to GoGrid\n\n .. versionadded:: 2015.8.0\n '''\n vm_ = get_configured_provider()\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n sharedsecret = config.get_cloud_config_value(\n 'sharedsecret', vm_, __opts__, search_global=False\n )\n\n path = 'https://api.gogrid.com/api/'\n\n if action:\n path += action\n\n if command:\n path += '/{0}'.format(command)\n\n log.debug('GoGrid URL: %s', path)\n\n if not isinstance(args, dict):\n args = {}\n\n epoch = six.text_type(int(time.time()))\n hashtext = ''.join((apikey, sharedsecret, epoch))\n args['sig'] = salt.utils.hashutils.md5_digest(hashtext)\n args['format'] = 'json'\n args['v'] = '1.0'\n args['api_key'] = apikey\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug('GoGrid Response Status Code: %s', result['status'])\n\n return result['dict']\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
saltstack/salt
salt/cloud/clouds/gogrid.py
_query
python
def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): ''' Make a web call to GoGrid .. versionadded:: 2015.8.0 ''' vm_ = get_configured_provider() apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( 'sharedsecret', vm_, __opts__, search_global=False ) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: %s', path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = salt.utils.hashutils.md5_digest(hashtext) args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__, ) log.debug('GoGrid Response Status Code: %s', result['status']) return result['dict']
Make a web call to GoGrid .. versionadded:: 2015.8.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L501-L565
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n", "def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('apikey', 'sharedsecret')\n )\n" ]
# -*- coding: utf-8 -*- ''' GoGrid Cloud Module ==================== The GoGrid cloud module. This module interfaces with the gogrid public cloud service. To use Salt Cloud with GoGrid log into the GoGrid web interface and create an api key. Do this by clicking on "My Account" and then going to the API Keys tab. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/gogrid.conf``: .. code-block:: yaml my-gogrid-config: # The generated api key to use apikey: asdff7896asdh789 # The apikey's shared secret sharedsecret: saltybacon driver: gogrid .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. .. note:: A Note about using Map files with GoGrid: Due to limitations in the GoGrid API, instances cannot be provisioned in parallel with the GoGrid driver. Map files will work with GoGrid, but the ``-P`` argument should not be used on maps referencing GoGrid instances. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import pprint import logging import time # Import salt cloud libs import salt.config as config import salt.utils.cloud import salt.utils.hashutils from salt.exceptions import SaltCloudSystemExit, SaltCloudException from salt.ext import six # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'gogrid' # Only load in this module if the GoGrid configurations are in place def __virtual__(): ''' Check for GoGrid configs ''' if get_configured_provider() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('apikey', 'sharedsecret') ) def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'gogrid', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if len(vm_['name']) > 20: raise SaltCloudException('VM names must not be longer than 20 characters') log.info('Creating Cloud VM %s', vm_['name']) image_id = avail_images()[vm_['image']]['id'] if 'assign_public_ip' in vm_: host_ip = vm_['assign_public_ip'] else: public_ips = list_public_ips() if not public_ips: raise SaltCloudException('No more IPs available') host_ip = next(iter(public_ips)) create_kwargs = { 'name': vm_['name'], 'image': image_id, 'ram': vm_['size'], 'ip': host_ip, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', create_kwargs, list(create_kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _query('grid', 'server/add', args=create_kwargs) except Exception: log.error( 'Error creating %s on GOGRID\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n', vm_['name'], # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) def wait_for_apipass(): ''' Wait for the password to become available, via the API ''' try: passwords = list_passwords() return passwords[vm_['name']][0]['password'] except KeyError: pass time.sleep(5) return False vm_['password'] = salt.utils.cloud.wait_for_fun( wait_for_apipass, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['ssh_host'] = host_ip ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret def list_nodes(full=False, call=None): ''' List of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} nodes = list_nodes_full('function') if full: return nodes for node in nodes: ret[node] = {} for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'): ret[node][item] = nodes[node][item] return ret def list_nodes_full(call=None): ''' List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F ''' response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S ''' return salt.utils.cloud.list_nodes_select( list_nodes_full('function'), __opts__['query.selection'], call, ) def avail_locations(): ''' Available locations ''' response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_sizes(): ''' Available sizes ''' response = list_common_lookups(kwargs={'lookup': 'server.ram'}) ret = {} for item in response['list']: name = item['name'] ret[name] = item return ret def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret def list_public_ips(kwargs=None, call=None): ''' List all available public IPs. CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> To list unavailable (assigned) IPs, use: CLI Example: .. code-block:: bash salt-cloud -f list_public_ips <provider> state=assigned .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'state' in kwargs: if kwargs['state'] == 'assigned': args['ip.state'] = 'Assigned' else: args['ip.state'] = 'Unassigned' else: args['ip.state'] = 'Unassigned' args['ip.type'] = 'Public' response = _query('grid', 'ip/list', args=args) ret = {} for item in response['list']: name = item['ip'] ret[name] = item return ret def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response def destroy(name, call=None): ''' Destroy a machine by name CLI Example: .. code-block:: bash salt-cloud -d vm_name ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) response = _query('grid', 'server/delete', args={'name': name}) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return response def reboot(name, call=None): ''' Reboot a machine by name CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'restart'}) def stop(name, call=None): ''' Stop a machine by name CLI Example: .. code-block:: bash salt-cloud -a stop vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'stop'}) def start(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a start vm_name .. versionadded:: 2015.8.0 ''' return _query('grid', 'server/power', args={'name': name, 'power': 'start'}) def show_instance(name, call=None): ''' Start a machine by name CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. versionadded:: 2015.8.0 ''' response = _query('grid', 'server/get', args={'name': name}) ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret
saltstack/salt
salt/states/reg.py
_parse_key
python
def _parse_key(key): ''' split the hive from the key ''' splt = key.split("\\") hive = splt.pop(0) key = '\\'.join(splt) return hive, key
split the hive from the key
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/reg.py#L104-L111
null
# -*- coding: utf-8 -*- r''' Manage the Windows registry =========================== Many python developers think of registry keys as if they were python keys in a dictionary which is not the case. The windows registry is broken down into the following components: Hives ----- This is the top level of the registry. They all begin with HKEY. - HKEY_CLASSES_ROOT (HKCR) - HKEY_CURRENT_USER(HKCU) - HKEY_LOCAL MACHINE (HKLM) - HKEY_USER (HKU) - HKEY_CURRENT_CONFIG Keys ---- Hives contain keys. These are basically the folders beneath the hives. They can contain any number of subkeys. When passing the hive\key values they must be quoted correctly depending on the backslashes being used (``\`` vs ``\\``). The way backslashes are handled in the state file is different from the way they are handled when working on the CLI. The following are valid methods of passing the hive\key: Using single backslashes: HKLM\SOFTWARE\Python 'HKLM\SOFTWARE\Python' Using double backslashes: "HKLM\\SOFTWARE\\Python" Values or Entries ----------------- Values or Entries are the name/data pairs beneath the keys and subkeys. All keys have a default name/data pair. The name is ``(Default)`` with a displayed value of ``(value not set)``. The actual value is Null. Example ------- The following example is taken from the windows startup portion of the registry: .. code-block:: text [HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run] "RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s" "NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\"" "BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp" In this example these are the values for each: Hive: ``HKEY_LOCAL_MACHINE`` Key and subkeys: ``SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run`` Value: - There are 3 value names: ``RTHDVCPL``, ``NvBackend``, and ``BTMTrayAgent`` - Each value name has a corresponding value ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.stringutils log = logging.getLogger(__name__) def __virtual__(): ''' Load this state if the reg module exists ''' if 'reg.read_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.read_value') if 'reg.set_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.set_value') if 'reg.delete_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.delete_value') if 'reg.delete_key_recursive' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.delete_key_recursive') return 'reg' def present(name, vname=None, vdata=None, vtype='REG_SZ', use_32bit_registry=False, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=True, win_perms_reset=False): r''' Ensure a registry key or value is present. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value vdata (str, int, list, bytes): The value you'd like to set. If a value name (``vname``) is passed, this will be the data for that value name. If not, this will be the ``(Default)`` value for the key. The type of data this parameter expects is determined by the value type specified in ``vtype``. The correspondence is as follows: - REG_BINARY: Binary data (str in Py2, bytes in Py3) - REG_DWORD: int - REG_EXPAND_SZ: str - REG_MULTI_SZ: list of str - REG_QWORD: int - REG_SZ: str .. note:: When setting REG_BINARY, string data will be converted to binary automatically. To pass binary data, use the built-in yaml tag ``!!binary`` to denote the actual binary characters. For example, the following lines will both set the same data in the registry: - ``vdata: Salty Test`` - ``vdata: !!binary U2FsdHkgVGVzdA==\n`` For more information about the ``!!binary`` tag see `here <http://yaml.org/type/binary.html>`_ .. note:: The type for the ``(Default)`` value is always REG_SZ and cannot be changed. This parameter is optional. If not passed, the Key will be created with no associated item/value pairs. vtype (str): The value type for the data you wish to store in the registry. Valid values are: - REG_BINARY - REG_DWORD - REG_EXPAND_SZ - REG_MULTI_SZ - REG_QWORD - REG_SZ (Default) use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. win_owner (str): The owner of the registry key. If this is not passed, the account under which Salt is running will be used. .. note:: Owner is set for the key that contains the value/data pair. You cannot set ownership on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms (dict): A dictionary containing permissions to grant and their propagation. If not passed the 'Grant` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. For each user specify the account name, with a sub dict for the permissions to grant and the 'Applies to' setting. For example: ``{'Administrators': {'perms': 'full_control', 'applies_to': 'this_key_subkeys'}}``. ``perms`` must be specified. Registry permissions are specified using the ``perms`` key. You can specify a single basic permission or a list of advanced perms. The following are valid perms: Basic (passed as a string): - full_control - read - write Advanced (passed as a list): - delete - query_value - set_value - create_subkey - enum_subkeys - notify - create_link - read_control - write_dac - write_owner The 'Applies to' setting is optional. It is specified using the ``applies_to`` key. If not specified ``this_key_subkeys`` is used. Valid options are: Applies to settings: - this_key_only - this_key_subkeys - subkeys_only .. versionadded:: 2019.2.0 win_deny_perms (dict): A dictionary containing permissions to deny and their propagation. If not passed the `Deny` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. Valid options are the same as those specified in ``win_perms`` .. note:: 'Deny' permissions always take precedence over 'grant' permissions. .. versionadded:: 2019.2.0 win_inheritance (bool): ``True`` to inherit permissions from the parent key. ``False`` to disable inheritance. Default is ``True``. .. note:: Inheritance is set for the key that contains the value/data pair. You cannot set inheritance on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms_reset (bool): If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False`` .. note:: Perms are reset for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. .. versionadded:: 2019.2.0 Returns: dict: A dictionary showing the results of the registry operation. Example: The following example will set the ``(Default)`` value for the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vdata: 2016.3.1 Example: The following example will set the value for the ``version`` entry under the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The value will be reflected in ``Wow6432Node``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vname: version - vdata: 2016.3.1 In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\\Salt`` is the key - ``vname`` is the value name ('version') that will be created under the key - ``vdata`` is the data that will be assigned to 'version' Example: Binary data can be set in two ways. The following two examples will set a binary value of ``Salty Test`` .. code-block:: yaml no_conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state - vdata: Salty Test - vtype: REG_BINARY conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state_with_tag - vdata: !!binary U2FsdHkgVGVzdA==\n - vtype: REG_BINARY Example: To set a ``REG_MULTI_SZ`` value: .. code-block:: yaml reg_multi_sz: reg.present: - name: HKLM\SOFTWARE\Salt - vname: reg_multi_sz - vdata: - list item 1 - list item 2 Example: To ensure a key is present and has permissions: .. code-block:: yaml set_key_permissions: reg.present: - name: HKLM\SOFTWARE\Salt - vname: version - vdata: 2016.3.1 - win_owner: Administrators - win_perms: jsnuffy: perms: full_control sjones: perms: - read_control - enum_subkeys - query_value applies_to: - this_key_only - win_deny_perms: bsimpson: perms: full_control applies_to: this_key_subkeys - win_inheritance: True - win_perms_reset: True ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_current = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) # Check if the key already exists # If so, check perms # We check `vdata` and `success` because `vdata` can be None if vdata == reg_current['vdata'] and reg_current['success']: ret['comment'] = '{0} in {1} is already present' \ ''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)', salt.utils.stringutils.to_unicode(name, 'utf-8')) return __utils__['dacl.check_perms']( obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) # Cast the vdata according to the vtype vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype) add_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'), 'Value': vdata_decoded, 'Owner': win_owner, 'Perms': {'Grant': win_perms, 'Deny': win_deny_perms}, 'Inheritance': win_inheritance} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will add': add_change}} return ret # Configure the value ret['result'] = __utils__['reg.set_value'](hive=hive, key=key, vname=vname, vdata=vdata, vtype=vtype, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key) else: ret['changes'] = {'reg': {'Added': add_change}} ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key) if ret['result']: ret = __utils__['dacl.check_perms']( obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) return ret def absent(name, vname=None, use_32bit_registry=False): r''' Ensure a registry value is removed. To remove a key use key_absent. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml 'HKEY_CURRENT_USER\\SOFTWARE\\Salt': reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted. ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_check = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not reg_check['success'] or reg_check['vdata'] == '(value not set)': ret['comment'] = '{0} is already absent'.format(name) return ret remove_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(vname if vname else '(Default)')} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will remove': remove_change}} return ret # Delete the value ret['result'] = __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive) else: ret['changes'] = {'reg': {'Removed': remove_change}} ret['comment'] = r'Removed {0} from {1}'.format(key, hive) return ret def key_absent(name, use_32bit_registry=False): r''' .. versionadded:: 2015.5.4 Ensure a registry key is removed. This will remove the key, subkeys, and all value entries. Args: name (str): A string representing the full path to the key to be removed to include the hive and the keypath. The hive can be any of the following: - HKEY_LOCAL_MACHINE or HKLM - HKEY_CURRENT_USER or HKCU - HKEY_USER or HKU use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: The following example will delete the ``SOFTWARE\DeleteMe`` key in the ``HKEY_LOCAL_MACHINE`` hive including all its subkeys and value pairs. .. code-block:: yaml remove_key_demo: reg.key_absent: - name: HKEY_CURRENT_USER\SOFTWARE\DeleteMe In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\DeleteMe`` is the key ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do if not __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']: ret['comment'] = '{0} is already absent'.format(name) return ret ret['changes'] = { 'reg': { 'Removed': { 'Key': r'{0}\{1}'.format(hive, key)}}} # Check for test option if __opts__['test']: ret['result'] = None return ret # Delete the value __utils__['reg.delete_key_recursive'](hive=hive, key=key, use_32bit_registry=use_32bit_registry) if __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']: ret['result'] = False ret['changes'] = {} ret['comment'] = 'Failed to remove registry key {0}'.format(name) return ret
saltstack/salt
salt/states/reg.py
present
python
def present(name, vname=None, vdata=None, vtype='REG_SZ', use_32bit_registry=False, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=True, win_perms_reset=False): r''' Ensure a registry key or value is present. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value vdata (str, int, list, bytes): The value you'd like to set. If a value name (``vname``) is passed, this will be the data for that value name. If not, this will be the ``(Default)`` value for the key. The type of data this parameter expects is determined by the value type specified in ``vtype``. The correspondence is as follows: - REG_BINARY: Binary data (str in Py2, bytes in Py3) - REG_DWORD: int - REG_EXPAND_SZ: str - REG_MULTI_SZ: list of str - REG_QWORD: int - REG_SZ: str .. note:: When setting REG_BINARY, string data will be converted to binary automatically. To pass binary data, use the built-in yaml tag ``!!binary`` to denote the actual binary characters. For example, the following lines will both set the same data in the registry: - ``vdata: Salty Test`` - ``vdata: !!binary U2FsdHkgVGVzdA==\n`` For more information about the ``!!binary`` tag see `here <http://yaml.org/type/binary.html>`_ .. note:: The type for the ``(Default)`` value is always REG_SZ and cannot be changed. This parameter is optional. If not passed, the Key will be created with no associated item/value pairs. vtype (str): The value type for the data you wish to store in the registry. Valid values are: - REG_BINARY - REG_DWORD - REG_EXPAND_SZ - REG_MULTI_SZ - REG_QWORD - REG_SZ (Default) use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. win_owner (str): The owner of the registry key. If this is not passed, the account under which Salt is running will be used. .. note:: Owner is set for the key that contains the value/data pair. You cannot set ownership on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms (dict): A dictionary containing permissions to grant and their propagation. If not passed the 'Grant` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. For each user specify the account name, with a sub dict for the permissions to grant and the 'Applies to' setting. For example: ``{'Administrators': {'perms': 'full_control', 'applies_to': 'this_key_subkeys'}}``. ``perms`` must be specified. Registry permissions are specified using the ``perms`` key. You can specify a single basic permission or a list of advanced perms. The following are valid perms: Basic (passed as a string): - full_control - read - write Advanced (passed as a list): - delete - query_value - set_value - create_subkey - enum_subkeys - notify - create_link - read_control - write_dac - write_owner The 'Applies to' setting is optional. It is specified using the ``applies_to`` key. If not specified ``this_key_subkeys`` is used. Valid options are: Applies to settings: - this_key_only - this_key_subkeys - subkeys_only .. versionadded:: 2019.2.0 win_deny_perms (dict): A dictionary containing permissions to deny and their propagation. If not passed the `Deny` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. Valid options are the same as those specified in ``win_perms`` .. note:: 'Deny' permissions always take precedence over 'grant' permissions. .. versionadded:: 2019.2.0 win_inheritance (bool): ``True`` to inherit permissions from the parent key. ``False`` to disable inheritance. Default is ``True``. .. note:: Inheritance is set for the key that contains the value/data pair. You cannot set inheritance on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms_reset (bool): If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False`` .. note:: Perms are reset for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. .. versionadded:: 2019.2.0 Returns: dict: A dictionary showing the results of the registry operation. Example: The following example will set the ``(Default)`` value for the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vdata: 2016.3.1 Example: The following example will set the value for the ``version`` entry under the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The value will be reflected in ``Wow6432Node``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vname: version - vdata: 2016.3.1 In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\\Salt`` is the key - ``vname`` is the value name ('version') that will be created under the key - ``vdata`` is the data that will be assigned to 'version' Example: Binary data can be set in two ways. The following two examples will set a binary value of ``Salty Test`` .. code-block:: yaml no_conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state - vdata: Salty Test - vtype: REG_BINARY conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state_with_tag - vdata: !!binary U2FsdHkgVGVzdA==\n - vtype: REG_BINARY Example: To set a ``REG_MULTI_SZ`` value: .. code-block:: yaml reg_multi_sz: reg.present: - name: HKLM\SOFTWARE\Salt - vname: reg_multi_sz - vdata: - list item 1 - list item 2 Example: To ensure a key is present and has permissions: .. code-block:: yaml set_key_permissions: reg.present: - name: HKLM\SOFTWARE\Salt - vname: version - vdata: 2016.3.1 - win_owner: Administrators - win_perms: jsnuffy: perms: full_control sjones: perms: - read_control - enum_subkeys - query_value applies_to: - this_key_only - win_deny_perms: bsimpson: perms: full_control applies_to: this_key_subkeys - win_inheritance: True - win_perms_reset: True ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_current = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) # Check if the key already exists # If so, check perms # We check `vdata` and `success` because `vdata` can be None if vdata == reg_current['vdata'] and reg_current['success']: ret['comment'] = '{0} in {1} is already present' \ ''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)', salt.utils.stringutils.to_unicode(name, 'utf-8')) return __utils__['dacl.check_perms']( obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) # Cast the vdata according to the vtype vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype) add_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'), 'Value': vdata_decoded, 'Owner': win_owner, 'Perms': {'Grant': win_perms, 'Deny': win_deny_perms}, 'Inheritance': win_inheritance} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will add': add_change}} return ret # Configure the value ret['result'] = __utils__['reg.set_value'](hive=hive, key=key, vname=vname, vdata=vdata, vtype=vtype, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key) else: ret['changes'] = {'reg': {'Added': add_change}} ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key) if ret['result']: ret = __utils__['dacl.check_perms']( obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) return ret
r''' Ensure a registry key or value is present. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value vdata (str, int, list, bytes): The value you'd like to set. If a value name (``vname``) is passed, this will be the data for that value name. If not, this will be the ``(Default)`` value for the key. The type of data this parameter expects is determined by the value type specified in ``vtype``. The correspondence is as follows: - REG_BINARY: Binary data (str in Py2, bytes in Py3) - REG_DWORD: int - REG_EXPAND_SZ: str - REG_MULTI_SZ: list of str - REG_QWORD: int - REG_SZ: str .. note:: When setting REG_BINARY, string data will be converted to binary automatically. To pass binary data, use the built-in yaml tag ``!!binary`` to denote the actual binary characters. For example, the following lines will both set the same data in the registry: - ``vdata: Salty Test`` - ``vdata: !!binary U2FsdHkgVGVzdA==\n`` For more information about the ``!!binary`` tag see `here <http://yaml.org/type/binary.html>`_ .. note:: The type for the ``(Default)`` value is always REG_SZ and cannot be changed. This parameter is optional. If not passed, the Key will be created with no associated item/value pairs. vtype (str): The value type for the data you wish to store in the registry. Valid values are: - REG_BINARY - REG_DWORD - REG_EXPAND_SZ - REG_MULTI_SZ - REG_QWORD - REG_SZ (Default) use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. win_owner (str): The owner of the registry key. If this is not passed, the account under which Salt is running will be used. .. note:: Owner is set for the key that contains the value/data pair. You cannot set ownership on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms (dict): A dictionary containing permissions to grant and their propagation. If not passed the 'Grant` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. For each user specify the account name, with a sub dict for the permissions to grant and the 'Applies to' setting. For example: ``{'Administrators': {'perms': 'full_control', 'applies_to': 'this_key_subkeys'}}``. ``perms`` must be specified. Registry permissions are specified using the ``perms`` key. You can specify a single basic permission or a list of advanced perms. The following are valid perms: Basic (passed as a string): - full_control - read - write Advanced (passed as a list): - delete - query_value - set_value - create_subkey - enum_subkeys - notify - create_link - read_control - write_dac - write_owner The 'Applies to' setting is optional. It is specified using the ``applies_to`` key. If not specified ``this_key_subkeys`` is used. Valid options are: Applies to settings: - this_key_only - this_key_subkeys - subkeys_only .. versionadded:: 2019.2.0 win_deny_perms (dict): A dictionary containing permissions to deny and their propagation. If not passed the `Deny` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. Valid options are the same as those specified in ``win_perms`` .. note:: 'Deny' permissions always take precedence over 'grant' permissions. .. versionadded:: 2019.2.0 win_inheritance (bool): ``True`` to inherit permissions from the parent key. ``False`` to disable inheritance. Default is ``True``. .. note:: Inheritance is set for the key that contains the value/data pair. You cannot set inheritance on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms_reset (bool): If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False`` .. note:: Perms are reset for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. .. versionadded:: 2019.2.0 Returns: dict: A dictionary showing the results of the registry operation. Example: The following example will set the ``(Default)`` value for the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vdata: 2016.3.1 Example: The following example will set the value for the ``version`` entry under the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The value will be reflected in ``Wow6432Node``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vname: version - vdata: 2016.3.1 In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\\Salt`` is the key - ``vname`` is the value name ('version') that will be created under the key - ``vdata`` is the data that will be assigned to 'version' Example: Binary data can be set in two ways. The following two examples will set a binary value of ``Salty Test`` .. code-block:: yaml no_conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state - vdata: Salty Test - vtype: REG_BINARY conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state_with_tag - vdata: !!binary U2FsdHkgVGVzdA==\n - vtype: REG_BINARY Example: To set a ``REG_MULTI_SZ`` value: .. code-block:: yaml reg_multi_sz: reg.present: - name: HKLM\SOFTWARE\Salt - vname: reg_multi_sz - vdata: - list item 1 - list item 2 Example: To ensure a key is present and has permissions: .. code-block:: yaml set_key_permissions: reg.present: - name: HKLM\SOFTWARE\Salt - vname: version - vdata: 2016.3.1 - win_owner: Administrators - win_perms: jsnuffy: perms: full_control sjones: perms: - read_control - enum_subkeys - query_value applies_to: - this_key_only - win_deny_perms: bsimpson: perms: full_control applies_to: this_key_subkeys - win_inheritance: True - win_perms_reset: True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/reg.py#L114-L458
[ "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n", "def _parse_key(key):\n '''\n split the hive from the key\n '''\n splt = key.split(\"\\\\\")\n hive = splt.pop(0)\n key = '\\\\'.join(splt)\n return hive, key\n" ]
# -*- coding: utf-8 -*- r''' Manage the Windows registry =========================== Many python developers think of registry keys as if they were python keys in a dictionary which is not the case. The windows registry is broken down into the following components: Hives ----- This is the top level of the registry. They all begin with HKEY. - HKEY_CLASSES_ROOT (HKCR) - HKEY_CURRENT_USER(HKCU) - HKEY_LOCAL MACHINE (HKLM) - HKEY_USER (HKU) - HKEY_CURRENT_CONFIG Keys ---- Hives contain keys. These are basically the folders beneath the hives. They can contain any number of subkeys. When passing the hive\key values they must be quoted correctly depending on the backslashes being used (``\`` vs ``\\``). The way backslashes are handled in the state file is different from the way they are handled when working on the CLI. The following are valid methods of passing the hive\key: Using single backslashes: HKLM\SOFTWARE\Python 'HKLM\SOFTWARE\Python' Using double backslashes: "HKLM\\SOFTWARE\\Python" Values or Entries ----------------- Values or Entries are the name/data pairs beneath the keys and subkeys. All keys have a default name/data pair. The name is ``(Default)`` with a displayed value of ``(value not set)``. The actual value is Null. Example ------- The following example is taken from the windows startup portion of the registry: .. code-block:: text [HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run] "RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s" "NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\"" "BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp" In this example these are the values for each: Hive: ``HKEY_LOCAL_MACHINE`` Key and subkeys: ``SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run`` Value: - There are 3 value names: ``RTHDVCPL``, ``NvBackend``, and ``BTMTrayAgent`` - Each value name has a corresponding value ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.stringutils log = logging.getLogger(__name__) def __virtual__(): ''' Load this state if the reg module exists ''' if 'reg.read_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.read_value') if 'reg.set_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.set_value') if 'reg.delete_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.delete_value') if 'reg.delete_key_recursive' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.delete_key_recursive') return 'reg' def _parse_key(key): ''' split the hive from the key ''' splt = key.split("\\") hive = splt.pop(0) key = '\\'.join(splt) return hive, key def absent(name, vname=None, use_32bit_registry=False): r''' Ensure a registry value is removed. To remove a key use key_absent. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml 'HKEY_CURRENT_USER\\SOFTWARE\\Salt': reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted. ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_check = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not reg_check['success'] or reg_check['vdata'] == '(value not set)': ret['comment'] = '{0} is already absent'.format(name) return ret remove_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(vname if vname else '(Default)')} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will remove': remove_change}} return ret # Delete the value ret['result'] = __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive) else: ret['changes'] = {'reg': {'Removed': remove_change}} ret['comment'] = r'Removed {0} from {1}'.format(key, hive) return ret def key_absent(name, use_32bit_registry=False): r''' .. versionadded:: 2015.5.4 Ensure a registry key is removed. This will remove the key, subkeys, and all value entries. Args: name (str): A string representing the full path to the key to be removed to include the hive and the keypath. The hive can be any of the following: - HKEY_LOCAL_MACHINE or HKLM - HKEY_CURRENT_USER or HKCU - HKEY_USER or HKU use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: The following example will delete the ``SOFTWARE\DeleteMe`` key in the ``HKEY_LOCAL_MACHINE`` hive including all its subkeys and value pairs. .. code-block:: yaml remove_key_demo: reg.key_absent: - name: HKEY_CURRENT_USER\SOFTWARE\DeleteMe In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\DeleteMe`` is the key ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do if not __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']: ret['comment'] = '{0} is already absent'.format(name) return ret ret['changes'] = { 'reg': { 'Removed': { 'Key': r'{0}\{1}'.format(hive, key)}}} # Check for test option if __opts__['test']: ret['result'] = None return ret # Delete the value __utils__['reg.delete_key_recursive'](hive=hive, key=key, use_32bit_registry=use_32bit_registry) if __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']: ret['result'] = False ret['changes'] = {} ret['comment'] = 'Failed to remove registry key {0}'.format(name) return ret
saltstack/salt
salt/states/reg.py
absent
python
def absent(name, vname=None, use_32bit_registry=False): r''' Ensure a registry value is removed. To remove a key use key_absent. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml 'HKEY_CURRENT_USER\\SOFTWARE\\Salt': reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted. ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_check = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not reg_check['success'] or reg_check['vdata'] == '(value not set)': ret['comment'] = '{0} is already absent'.format(name) return ret remove_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(vname if vname else '(Default)')} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will remove': remove_change}} return ret # Delete the value ret['result'] = __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive) else: ret['changes'] = {'reg': {'Removed': remove_change}} ret['comment'] = r'Removed {0} from {1}'.format(key, hive) return ret
r''' Ensure a registry value is removed. To remove a key use key_absent. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml 'HKEY_CURRENT_USER\\SOFTWARE\\Salt': reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/reg.py#L461-L539
[ "def _parse_key(key):\n '''\n split the hive from the key\n '''\n splt = key.split(\"\\\\\")\n hive = splt.pop(0)\n key = '\\\\'.join(splt)\n return hive, key\n" ]
# -*- coding: utf-8 -*- r''' Manage the Windows registry =========================== Many python developers think of registry keys as if they were python keys in a dictionary which is not the case. The windows registry is broken down into the following components: Hives ----- This is the top level of the registry. They all begin with HKEY. - HKEY_CLASSES_ROOT (HKCR) - HKEY_CURRENT_USER(HKCU) - HKEY_LOCAL MACHINE (HKLM) - HKEY_USER (HKU) - HKEY_CURRENT_CONFIG Keys ---- Hives contain keys. These are basically the folders beneath the hives. They can contain any number of subkeys. When passing the hive\key values they must be quoted correctly depending on the backslashes being used (``\`` vs ``\\``). The way backslashes are handled in the state file is different from the way they are handled when working on the CLI. The following are valid methods of passing the hive\key: Using single backslashes: HKLM\SOFTWARE\Python 'HKLM\SOFTWARE\Python' Using double backslashes: "HKLM\\SOFTWARE\\Python" Values or Entries ----------------- Values or Entries are the name/data pairs beneath the keys and subkeys. All keys have a default name/data pair. The name is ``(Default)`` with a displayed value of ``(value not set)``. The actual value is Null. Example ------- The following example is taken from the windows startup portion of the registry: .. code-block:: text [HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run] "RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s" "NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\"" "BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp" In this example these are the values for each: Hive: ``HKEY_LOCAL_MACHINE`` Key and subkeys: ``SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run`` Value: - There are 3 value names: ``RTHDVCPL``, ``NvBackend``, and ``BTMTrayAgent`` - Each value name has a corresponding value ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.stringutils log = logging.getLogger(__name__) def __virtual__(): ''' Load this state if the reg module exists ''' if 'reg.read_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.read_value') if 'reg.set_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.set_value') if 'reg.delete_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.delete_value') if 'reg.delete_key_recursive' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.delete_key_recursive') return 'reg' def _parse_key(key): ''' split the hive from the key ''' splt = key.split("\\") hive = splt.pop(0) key = '\\'.join(splt) return hive, key def present(name, vname=None, vdata=None, vtype='REG_SZ', use_32bit_registry=False, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=True, win_perms_reset=False): r''' Ensure a registry key or value is present. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value vdata (str, int, list, bytes): The value you'd like to set. If a value name (``vname``) is passed, this will be the data for that value name. If not, this will be the ``(Default)`` value for the key. The type of data this parameter expects is determined by the value type specified in ``vtype``. The correspondence is as follows: - REG_BINARY: Binary data (str in Py2, bytes in Py3) - REG_DWORD: int - REG_EXPAND_SZ: str - REG_MULTI_SZ: list of str - REG_QWORD: int - REG_SZ: str .. note:: When setting REG_BINARY, string data will be converted to binary automatically. To pass binary data, use the built-in yaml tag ``!!binary`` to denote the actual binary characters. For example, the following lines will both set the same data in the registry: - ``vdata: Salty Test`` - ``vdata: !!binary U2FsdHkgVGVzdA==\n`` For more information about the ``!!binary`` tag see `here <http://yaml.org/type/binary.html>`_ .. note:: The type for the ``(Default)`` value is always REG_SZ and cannot be changed. This parameter is optional. If not passed, the Key will be created with no associated item/value pairs. vtype (str): The value type for the data you wish to store in the registry. Valid values are: - REG_BINARY - REG_DWORD - REG_EXPAND_SZ - REG_MULTI_SZ - REG_QWORD - REG_SZ (Default) use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. win_owner (str): The owner of the registry key. If this is not passed, the account under which Salt is running will be used. .. note:: Owner is set for the key that contains the value/data pair. You cannot set ownership on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms (dict): A dictionary containing permissions to grant and their propagation. If not passed the 'Grant` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. For each user specify the account name, with a sub dict for the permissions to grant and the 'Applies to' setting. For example: ``{'Administrators': {'perms': 'full_control', 'applies_to': 'this_key_subkeys'}}``. ``perms`` must be specified. Registry permissions are specified using the ``perms`` key. You can specify a single basic permission or a list of advanced perms. The following are valid perms: Basic (passed as a string): - full_control - read - write Advanced (passed as a list): - delete - query_value - set_value - create_subkey - enum_subkeys - notify - create_link - read_control - write_dac - write_owner The 'Applies to' setting is optional. It is specified using the ``applies_to`` key. If not specified ``this_key_subkeys`` is used. Valid options are: Applies to settings: - this_key_only - this_key_subkeys - subkeys_only .. versionadded:: 2019.2.0 win_deny_perms (dict): A dictionary containing permissions to deny and their propagation. If not passed the `Deny` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. Valid options are the same as those specified in ``win_perms`` .. note:: 'Deny' permissions always take precedence over 'grant' permissions. .. versionadded:: 2019.2.0 win_inheritance (bool): ``True`` to inherit permissions from the parent key. ``False`` to disable inheritance. Default is ``True``. .. note:: Inheritance is set for the key that contains the value/data pair. You cannot set inheritance on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms_reset (bool): If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False`` .. note:: Perms are reset for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. .. versionadded:: 2019.2.0 Returns: dict: A dictionary showing the results of the registry operation. Example: The following example will set the ``(Default)`` value for the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vdata: 2016.3.1 Example: The following example will set the value for the ``version`` entry under the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The value will be reflected in ``Wow6432Node``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vname: version - vdata: 2016.3.1 In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\\Salt`` is the key - ``vname`` is the value name ('version') that will be created under the key - ``vdata`` is the data that will be assigned to 'version' Example: Binary data can be set in two ways. The following two examples will set a binary value of ``Salty Test`` .. code-block:: yaml no_conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state - vdata: Salty Test - vtype: REG_BINARY conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state_with_tag - vdata: !!binary U2FsdHkgVGVzdA==\n - vtype: REG_BINARY Example: To set a ``REG_MULTI_SZ`` value: .. code-block:: yaml reg_multi_sz: reg.present: - name: HKLM\SOFTWARE\Salt - vname: reg_multi_sz - vdata: - list item 1 - list item 2 Example: To ensure a key is present and has permissions: .. code-block:: yaml set_key_permissions: reg.present: - name: HKLM\SOFTWARE\Salt - vname: version - vdata: 2016.3.1 - win_owner: Administrators - win_perms: jsnuffy: perms: full_control sjones: perms: - read_control - enum_subkeys - query_value applies_to: - this_key_only - win_deny_perms: bsimpson: perms: full_control applies_to: this_key_subkeys - win_inheritance: True - win_perms_reset: True ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_current = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) # Check if the key already exists # If so, check perms # We check `vdata` and `success` because `vdata` can be None if vdata == reg_current['vdata'] and reg_current['success']: ret['comment'] = '{0} in {1} is already present' \ ''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)', salt.utils.stringutils.to_unicode(name, 'utf-8')) return __utils__['dacl.check_perms']( obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) # Cast the vdata according to the vtype vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype) add_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'), 'Value': vdata_decoded, 'Owner': win_owner, 'Perms': {'Grant': win_perms, 'Deny': win_deny_perms}, 'Inheritance': win_inheritance} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will add': add_change}} return ret # Configure the value ret['result'] = __utils__['reg.set_value'](hive=hive, key=key, vname=vname, vdata=vdata, vtype=vtype, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key) else: ret['changes'] = {'reg': {'Added': add_change}} ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key) if ret['result']: ret = __utils__['dacl.check_perms']( obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) return ret def key_absent(name, use_32bit_registry=False): r''' .. versionadded:: 2015.5.4 Ensure a registry key is removed. This will remove the key, subkeys, and all value entries. Args: name (str): A string representing the full path to the key to be removed to include the hive and the keypath. The hive can be any of the following: - HKEY_LOCAL_MACHINE or HKLM - HKEY_CURRENT_USER or HKCU - HKEY_USER or HKU use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: The following example will delete the ``SOFTWARE\DeleteMe`` key in the ``HKEY_LOCAL_MACHINE`` hive including all its subkeys and value pairs. .. code-block:: yaml remove_key_demo: reg.key_absent: - name: HKEY_CURRENT_USER\SOFTWARE\DeleteMe In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\DeleteMe`` is the key ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do if not __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']: ret['comment'] = '{0} is already absent'.format(name) return ret ret['changes'] = { 'reg': { 'Removed': { 'Key': r'{0}\{1}'.format(hive, key)}}} # Check for test option if __opts__['test']: ret['result'] = None return ret # Delete the value __utils__['reg.delete_key_recursive'](hive=hive, key=key, use_32bit_registry=use_32bit_registry) if __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']: ret['result'] = False ret['changes'] = {} ret['comment'] = 'Failed to remove registry key {0}'.format(name) return ret
saltstack/salt
salt/states/reg.py
key_absent
python
def key_absent(name, use_32bit_registry=False): r''' .. versionadded:: 2015.5.4 Ensure a registry key is removed. This will remove the key, subkeys, and all value entries. Args: name (str): A string representing the full path to the key to be removed to include the hive and the keypath. The hive can be any of the following: - HKEY_LOCAL_MACHINE or HKLM - HKEY_CURRENT_USER or HKCU - HKEY_USER or HKU use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: The following example will delete the ``SOFTWARE\DeleteMe`` key in the ``HKEY_LOCAL_MACHINE`` hive including all its subkeys and value pairs. .. code-block:: yaml remove_key_demo: reg.key_absent: - name: HKEY_CURRENT_USER\SOFTWARE\DeleteMe In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\DeleteMe`` is the key ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do if not __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']: ret['comment'] = '{0} is already absent'.format(name) return ret ret['changes'] = { 'reg': { 'Removed': { 'Key': r'{0}\{1}'.format(hive, key)}}} # Check for test option if __opts__['test']: ret['result'] = None return ret # Delete the value __utils__['reg.delete_key_recursive'](hive=hive, key=key, use_32bit_registry=use_32bit_registry) if __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']: ret['result'] = False ret['changes'] = {} ret['comment'] = 'Failed to remove registry key {0}'.format(name) return ret
r''' .. versionadded:: 2015.5.4 Ensure a registry key is removed. This will remove the key, subkeys, and all value entries. Args: name (str): A string representing the full path to the key to be removed to include the hive and the keypath. The hive can be any of the following: - HKEY_LOCAL_MACHINE or HKLM - HKEY_CURRENT_USER or HKCU - HKEY_USER or HKU use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: The following example will delete the ``SOFTWARE\DeleteMe`` key in the ``HKEY_LOCAL_MACHINE`` hive including all its subkeys and value pairs. .. code-block:: yaml remove_key_demo: reg.key_absent: - name: HKEY_CURRENT_USER\SOFTWARE\DeleteMe In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\DeleteMe`` is the key
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/reg.py#L542-L619
[ "def _parse_key(key):\n '''\n split the hive from the key\n '''\n splt = key.split(\"\\\\\")\n hive = splt.pop(0)\n key = '\\\\'.join(splt)\n return hive, key\n" ]
# -*- coding: utf-8 -*- r''' Manage the Windows registry =========================== Many python developers think of registry keys as if they were python keys in a dictionary which is not the case. The windows registry is broken down into the following components: Hives ----- This is the top level of the registry. They all begin with HKEY. - HKEY_CLASSES_ROOT (HKCR) - HKEY_CURRENT_USER(HKCU) - HKEY_LOCAL MACHINE (HKLM) - HKEY_USER (HKU) - HKEY_CURRENT_CONFIG Keys ---- Hives contain keys. These are basically the folders beneath the hives. They can contain any number of subkeys. When passing the hive\key values they must be quoted correctly depending on the backslashes being used (``\`` vs ``\\``). The way backslashes are handled in the state file is different from the way they are handled when working on the CLI. The following are valid methods of passing the hive\key: Using single backslashes: HKLM\SOFTWARE\Python 'HKLM\SOFTWARE\Python' Using double backslashes: "HKLM\\SOFTWARE\\Python" Values or Entries ----------------- Values or Entries are the name/data pairs beneath the keys and subkeys. All keys have a default name/data pair. The name is ``(Default)`` with a displayed value of ``(value not set)``. The actual value is Null. Example ------- The following example is taken from the windows startup portion of the registry: .. code-block:: text [HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run] "RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s" "NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\"" "BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp" In this example these are the values for each: Hive: ``HKEY_LOCAL_MACHINE`` Key and subkeys: ``SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run`` Value: - There are 3 value names: ``RTHDVCPL``, ``NvBackend``, and ``BTMTrayAgent`` - Each value name has a corresponding value ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.stringutils log = logging.getLogger(__name__) def __virtual__(): ''' Load this state if the reg module exists ''' if 'reg.read_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.read_value') if 'reg.set_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.set_value') if 'reg.delete_value' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.delete_value') if 'reg.delete_key_recursive' not in __utils__: return (False, 'reg state module failed to load: ' 'missing util function: reg.delete_key_recursive') return 'reg' def _parse_key(key): ''' split the hive from the key ''' splt = key.split("\\") hive = splt.pop(0) key = '\\'.join(splt) return hive, key def present(name, vname=None, vdata=None, vtype='REG_SZ', use_32bit_registry=False, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=True, win_perms_reset=False): r''' Ensure a registry key or value is present. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value vdata (str, int, list, bytes): The value you'd like to set. If a value name (``vname``) is passed, this will be the data for that value name. If not, this will be the ``(Default)`` value for the key. The type of data this parameter expects is determined by the value type specified in ``vtype``. The correspondence is as follows: - REG_BINARY: Binary data (str in Py2, bytes in Py3) - REG_DWORD: int - REG_EXPAND_SZ: str - REG_MULTI_SZ: list of str - REG_QWORD: int - REG_SZ: str .. note:: When setting REG_BINARY, string data will be converted to binary automatically. To pass binary data, use the built-in yaml tag ``!!binary`` to denote the actual binary characters. For example, the following lines will both set the same data in the registry: - ``vdata: Salty Test`` - ``vdata: !!binary U2FsdHkgVGVzdA==\n`` For more information about the ``!!binary`` tag see `here <http://yaml.org/type/binary.html>`_ .. note:: The type for the ``(Default)`` value is always REG_SZ and cannot be changed. This parameter is optional. If not passed, the Key will be created with no associated item/value pairs. vtype (str): The value type for the data you wish to store in the registry. Valid values are: - REG_BINARY - REG_DWORD - REG_EXPAND_SZ - REG_MULTI_SZ - REG_QWORD - REG_SZ (Default) use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. win_owner (str): The owner of the registry key. If this is not passed, the account under which Salt is running will be used. .. note:: Owner is set for the key that contains the value/data pair. You cannot set ownership on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms (dict): A dictionary containing permissions to grant and their propagation. If not passed the 'Grant` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. For each user specify the account name, with a sub dict for the permissions to grant and the 'Applies to' setting. For example: ``{'Administrators': {'perms': 'full_control', 'applies_to': 'this_key_subkeys'}}``. ``perms`` must be specified. Registry permissions are specified using the ``perms`` key. You can specify a single basic permission or a list of advanced perms. The following are valid perms: Basic (passed as a string): - full_control - read - write Advanced (passed as a list): - delete - query_value - set_value - create_subkey - enum_subkeys - notify - create_link - read_control - write_dac - write_owner The 'Applies to' setting is optional. It is specified using the ``applies_to`` key. If not specified ``this_key_subkeys`` is used. Valid options are: Applies to settings: - this_key_only - this_key_subkeys - subkeys_only .. versionadded:: 2019.2.0 win_deny_perms (dict): A dictionary containing permissions to deny and their propagation. If not passed the `Deny` permissions will not be modified. .. note:: Permissions are set for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. Valid options are the same as those specified in ``win_perms`` .. note:: 'Deny' permissions always take precedence over 'grant' permissions. .. versionadded:: 2019.2.0 win_inheritance (bool): ``True`` to inherit permissions from the parent key. ``False`` to disable inheritance. Default is ``True``. .. note:: Inheritance is set for the key that contains the value/data pair. You cannot set inheritance on value/data pairs themselves. .. versionadded:: 2019.2.0 win_perms_reset (bool): If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False`` .. note:: Perms are reset for the key that contains the value/data pair. You cannot set permissions on value/data pairs themselves. .. versionadded:: 2019.2.0 Returns: dict: A dictionary showing the results of the registry operation. Example: The following example will set the ``(Default)`` value for the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vdata: 2016.3.1 Example: The following example will set the value for the ``version`` entry under the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The value will be reflected in ``Wow6432Node``: .. code-block:: yaml HKEY_CURRENT_USER\\SOFTWARE\\Salt: reg.present: - vname: version - vdata: 2016.3.1 In the above example the path is interpreted as follows: - ``HKEY_CURRENT_USER`` is the hive - ``SOFTWARE\\Salt`` is the key - ``vname`` is the value name ('version') that will be created under the key - ``vdata`` is the data that will be assigned to 'version' Example: Binary data can be set in two ways. The following two examples will set a binary value of ``Salty Test`` .. code-block:: yaml no_conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state - vdata: Salty Test - vtype: REG_BINARY conversion: reg.present: - name: HKLM\SOFTWARE\SaltTesting - vname: test_reg_binary_state_with_tag - vdata: !!binary U2FsdHkgVGVzdA==\n - vtype: REG_BINARY Example: To set a ``REG_MULTI_SZ`` value: .. code-block:: yaml reg_multi_sz: reg.present: - name: HKLM\SOFTWARE\Salt - vname: reg_multi_sz - vdata: - list item 1 - list item 2 Example: To ensure a key is present and has permissions: .. code-block:: yaml set_key_permissions: reg.present: - name: HKLM\SOFTWARE\Salt - vname: version - vdata: 2016.3.1 - win_owner: Administrators - win_perms: jsnuffy: perms: full_control sjones: perms: - read_control - enum_subkeys - query_value applies_to: - this_key_only - win_deny_perms: bsimpson: perms: full_control applies_to: this_key_subkeys - win_inheritance: True - win_perms_reset: True ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_current = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) # Check if the key already exists # If so, check perms # We check `vdata` and `success` because `vdata` can be None if vdata == reg_current['vdata'] and reg_current['success']: ret['comment'] = '{0} in {1} is already present' \ ''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)', salt.utils.stringutils.to_unicode(name, 'utf-8')) return __utils__['dacl.check_perms']( obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) # Cast the vdata according to the vtype vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype) add_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'), 'Value': vdata_decoded, 'Owner': win_owner, 'Perms': {'Grant': win_perms, 'Deny': win_deny_perms}, 'Inheritance': win_inheritance} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will add': add_change}} return ret # Configure the value ret['result'] = __utils__['reg.set_value'](hive=hive, key=key, vname=vname, vdata=vdata, vtype=vtype, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key) else: ret['changes'] = {'reg': {'Added': add_change}} ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key) if ret['result']: ret = __utils__['dacl.check_perms']( obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) return ret def absent(name, vname=None, use_32bit_registry=False): r''' Ensure a registry value is removed. To remove a key use key_absent. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml 'HKEY_CURRENT_USER\\SOFTWARE\\Salt': reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted. ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_check = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not reg_check['success'] or reg_check['vdata'] == '(value not set)': ret['comment'] = '{0} is already absent'.format(name) return ret remove_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(vname if vname else '(Default)')} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will remove': remove_change}} return ret # Delete the value ret['result'] = __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive) else: ret['changes'] = {'reg': {'Removed': remove_change}} ret['comment'] = r'Removed {0} from {1}'.format(key, hive) return ret
saltstack/salt
salt/utils/thin.py
_get_salt_call
python
def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template)
Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L89-L131
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
_add_dependency
python
def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py'))
Add a dependency to the top list. :param obj: :param is_file: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L155-L166
null
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
gte
python
def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False)
This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L169-L181
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def get_tops(extra_mods='', so_mods=''):\n '''\n Get top directories for the dependencies, based on Python interpreter.\n\n :param extra_mods:\n :param so_mods:\n :return:\n '''\n tops = []\n for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent,\n singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]:\n if mod:\n log.debug('Adding module to the tops: \"%s\"', mod.__name__)\n _add_dependency(tops, mod)\n\n for mod in [m for m in extra_mods.split(',') if m]:\n if mod not in locals() and mod not in globals():\n try:\n locals()[mod] = __import__(mod)\n moddir, modname = os.path.split(locals()[mod].__file__)\n base, _ = os.path.splitext(modname)\n if base == '__init__':\n tops.append(moddir)\n else:\n tops.append(os.path.join(moddir, base + '.py'))\n except ImportError as err:\n log.exception(err)\n log.error('Unable to import extra-module \"%s\"', mod)\n\n for mod in [m for m in so_mods.split(',') if m]:\n try:\n locals()[mod] = __import__(mod)\n tops.append(locals()[mod].__file__)\n except ImportError as err:\n log.exception(err)\n log.error('Unable to import so-module \"%s\"', mod)\n\n return tops\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
get_ext_tops
python
def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives
Get top directories for the dependencies, based on external configuration. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L184-L233
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
_get_ext_namespaces
python
def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces
Get namespaces from the existing configuration. :param config: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L236-L255
null
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
get_tops
python
def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops
Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L258-L295
[ "def _add_dependency(container, obj):\n '''\n Add a dependency to the top list.\n\n :param obj:\n :param is_file:\n :return:\n '''\n if os.path.basename(obj.__file__).split('.')[0] == '__init__':\n container.append(os.path.dirname(obj.__file__))\n else:\n container.append(obj.__file__.replace('.pyc', '.py'))\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
_get_supported_py_config
python
def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap))
Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L298-L320
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
_get_thintar_prefix
python
def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname
Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L323-L334
null
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
gen_thin
python
def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar
Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L337-L563
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def _get_salt_call(*dirs, **namespaces):\n '''\n Return salt-call source, based on configuration.\n This will include additional namespaces for another versions of Salt,\n if needed (e.g. older interpreters etc).\n\n :dirs: List of directories to include in the system path\n :namespaces: Dictionary of namespace\n :return:\n '''\n template = '''# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n# Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]}\n# Appears only when configured in Master configuration.\nnamespaces = %namespaces%\n\n# Default system paths alongside the namespaces\nsyspaths = %dirs%\nsyspaths.append('py{0}'.format(sys.version_info[0]))\n\ncurr_ver = (sys.version_info[0], sys.version_info[1],)\n\nnamespace = ''\nfor ns in namespaces:\n if curr_ver == tuple(namespaces[ns]):\n namespace = ns\n break\n\nfor base in syspaths:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__),\n namespace and os.path.join(namespace, base) or base))\n\nif __name__ == '__main__':\n from salt.scripts import salt_call\n salt_call()\n'''\n\n for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]:\n template = template.replace(tgt, salt.utils.json.dumps(cnt))\n\n return salt.utils.stringutils.to_bytes(template)\n", "def _get_ext_namespaces(config):\n '''\n Get namespaces from the existing configuration.\n\n :param config:\n :return:\n '''\n namespaces = {}\n if not config:\n return namespaces\n\n for ns in config:\n constraint_version = tuple(config[ns].get('py-version', []))\n if not constraint_version:\n raise salt.exceptions.SaltSystemExit(\"An alternative version is configured, but not defined \"\n \"to what Python's major/minor version it should be constrained.\")\n else:\n namespaces[ns] = constraint_version\n\n return namespaces\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
thin_sum
python
def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form)
Return the checksum of the current thin tarball
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L566-L578
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='',\n python2_bin='python2', python3_bin='python3', absonly=True,\n compress='gzip', extended_cfg=None):\n '''\n Generate the salt-thin tarball and print the location of the tarball\n Optional additional mods to include (e.g. mako) can be supplied as a comma\n delimited string. Permits forcing an overwrite of the output file as well.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-run thin.generate\n salt-run thin.generate mako\n salt-run thin.generate mako,wempy 1\n salt-run thin.generate overwrite=1\n '''\n if sys.version_info < (2, 6):\n raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is \"2.6\".')\n if compress not in ['gzip', 'zip']:\n log.warning('Unknown compression type: \"%s\". Falling back to \"gzip\" compression.', compress)\n compress = 'gzip'\n\n thindir = os.path.join(cachedir, 'thin')\n if not os.path.isdir(thindir):\n os.makedirs(thindir)\n thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip'))\n thinver = os.path.join(thindir, 'version')\n pythinver = os.path.join(thindir, '.thin-gen-py-version')\n salt_call = os.path.join(thindir, 'salt-call')\n pymap_cfg = os.path.join(thindir, 'supported-versions')\n code_checksum = os.path.join(thindir, 'code-checksum')\n digest_collector = salt.utils.hashutils.DigestCollector()\n\n with salt.utils.files.fopen(salt_call, 'wb') as fp_:\n fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg)))\n\n if os.path.isfile(thintar):\n if not overwrite:\n if os.path.isfile(thinver):\n with salt.utils.files.fopen(thinver) as fh_:\n overwrite = fh_.read() != salt.version.__version__\n if overwrite is False and os.path.isfile(pythinver):\n with salt.utils.files.fopen(pythinver) as fh_:\n overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function\n else:\n overwrite = True\n\n if overwrite:\n try:\n log.debug('Removing %s archive file', thintar)\n os.remove(thintar)\n except OSError as exc:\n log.error('Error while removing %s file: %s', thintar, exc)\n if os.path.exists(thintar):\n raise salt.exceptions.SaltSystemExit(\n 'Unable to remove {0}. See logs for details.'.format(thintar)\n )\n else:\n return thintar\n if _six.PY3:\n # Let's check for the minimum python 2 version requirement, 2.6\n py_shell_cmd = \"{} -c 'import sys;sys.stdout.write(\\\"%s.%s\\\\n\\\" % sys.version_info[:2]);'\".format(python2_bin)\n cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True)\n stdout, _ = cmd.communicate()\n if cmd.returncode == 0:\n py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.'))\n if py2_version < (2, 6):\n raise salt.exceptions.SaltSystemExit(\n 'The minimum required python version to run salt-ssh is \"2.6\".'\n 'The version reported by \"{0}\" is \"{1}\". Please try \"salt-ssh '\n '--python2-bin=<path-to-python-2.6-binary-or-higher>\".'.format(python2_bin, stdout.strip()))\n else:\n log.error('Unable to detect Python-2 version')\n log.debug(stdout)\n\n tops_failure_msg = 'Failed %s tops for Python binary %s.'\n tops_py_version_mapping = {}\n tops = get_tops(extra_mods=extra_mods, so_mods=so_mods)\n tops_py_version_mapping[sys.version_info.major] = tops\n\n # Collect tops, alternative to 2.x version\n if _six.PY2 and sys.version_info.major == 2:\n # Get python 3 tops\n py_shell_cmd = \"{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'\".format(\n python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods}))\n cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n stdout, stderr = cmd.communicate()\n if cmd.returncode == 0:\n try:\n tops = salt.utils.json.loads(stdout)\n tops_py_version_mapping['3'] = tops\n except ValueError as err:\n log.error(tops_failure_msg, 'parsing', python3_bin)\n log.exception(err)\n else:\n log.error(tops_failure_msg, 'collecting', python3_bin)\n log.debug(stderr)\n\n # Collect tops, alternative to 3.x version\n if _six.PY3 and sys.version_info.major == 3:\n # Get python 2 tops\n py_shell_cmd = \"{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'\".format(\n python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods}))\n cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n stdout, stderr = cmd.communicate()\n if cmd.returncode == 0:\n try:\n tops = salt.utils.json.loads(stdout.decode('utf-8'))\n tops_py_version_mapping['2'] = tops\n except ValueError as err:\n log.error(tops_failure_msg, 'parsing', python2_bin)\n log.exception(err)\n else:\n log.error(tops_failure_msg, 'collecting', python2_bin)\n log.debug(stderr)\n\n with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_:\n fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg))\n\n tmp_thintar = _get_thintar_prefix(thintar)\n if compress == 'gzip':\n tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True)\n elif compress == 'zip':\n tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED)\n tfp.add = tfp.write\n\n try: # cwd may not exist if it was removed but salt was run from it\n start_dir = os.getcwd()\n except OSError:\n start_dir = None\n tempdir = None\n\n # Pack default data\n log.debug('Packing default libraries based on current Salt version')\n for py_ver, tops in _six.iteritems(tops_py_version_mapping):\n for top in tops:\n if absonly and not os.path.isabs(top):\n continue\n base = os.path.basename(top)\n top_dirname = os.path.dirname(top)\n if os.path.isdir(top_dirname):\n os.chdir(top_dirname)\n else:\n # This is likely a compressed python .egg\n tempdir = tempfile.mkdtemp()\n egg = zipfile.ZipFile(top_dirname)\n egg.extractall(tempdir)\n top = os.path.join(tempdir, base)\n os.chdir(tempdir)\n\n site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver)\n\n log.debug('Packing \"%s\" to \"%s\" destination', base, site_pkg_dir)\n if not os.path.isdir(top):\n # top is a single file module\n if os.path.exists(os.path.join(top_dirname, base)):\n tfp.add(base, arcname=os.path.join(site_pkg_dir, base))\n continue\n for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True):\n for name in files:\n if not name.endswith(('.pyc', '.pyo')):\n digest_collector.add(os.path.join(root, name))\n arcname = os.path.join(site_pkg_dir, root, name)\n if hasattr(tfp, 'getinfo'):\n try:\n # This is a little slow but there's no clear way to detect duplicates\n tfp.getinfo(os.path.join(site_pkg_dir, root, name))\n arcname = None\n except KeyError:\n log.debug('ZIP: Unable to add \"%s\" with \"getinfo\"', arcname)\n if arcname:\n tfp.add(os.path.join(root, name), arcname=arcname)\n\n if tempdir is not None:\n shutil.rmtree(tempdir)\n tempdir = None\n\n # Pack alternative data\n if extended_cfg:\n log.debug('Packing libraries based on alternative Salt versions')\n for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)):\n tops = [cfg.get('path')] + cfg.get('dependencies')\n py_ver_major, py_ver_minor = cfg.get('py-version')\n for top in tops:\n base, top_dirname = os.path.basename(top), os.path.dirname(top)\n os.chdir(top_dirname)\n site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major)\n log.debug('Packing alternative \"%s\" to \"%s/%s\" destination', base, ns, site_pkg_dir)\n if not os.path.isdir(top):\n # top is a single file module\n if os.path.exists(os.path.join(top_dirname, base)):\n tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base))\n continue\n for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True):\n for name in files:\n if not name.endswith(('.pyc', '.pyo')):\n digest_collector.add(os.path.join(root, name))\n arcname = os.path.join(ns, site_pkg_dir, root, name)\n if hasattr(tfp, 'getinfo'):\n try:\n tfp.getinfo(os.path.join(site_pkg_dir, root, name))\n arcname = None\n except KeyError:\n log.debug('ZIP: Unable to add \"%s\" with \"getinfo\"', arcname)\n if arcname:\n tfp.add(os.path.join(root, name), arcname=arcname)\n\n os.chdir(thindir)\n with salt.utils.files.fopen(thinver, 'w+') as fp_:\n fp_.write(salt.version.__version__)\n with salt.utils.files.fopen(pythinver, 'w+') as fp_:\n fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function\n with salt.utils.files.fopen(code_checksum, 'w+') as fp_:\n fp_.write(digest_collector.digest())\n os.chdir(os.path.dirname(thinver))\n\n for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']:\n tfp.add(fname)\n\n if start_dir:\n os.chdir(start_dir)\n tfp.close()\n\n shutil.move(tmp_thintar, thintar)\n\n return thintar\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
gen_min
python
def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar
Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L581-L859
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def _get_salt_call(*dirs, **namespaces):\n '''\n Return salt-call source, based on configuration.\n This will include additional namespaces for another versions of Salt,\n if needed (e.g. older interpreters etc).\n\n :dirs: List of directories to include in the system path\n :namespaces: Dictionary of namespace\n :return:\n '''\n template = '''# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n# Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]}\n# Appears only when configured in Master configuration.\nnamespaces = %namespaces%\n\n# Default system paths alongside the namespaces\nsyspaths = %dirs%\nsyspaths.append('py{0}'.format(sys.version_info[0]))\n\ncurr_ver = (sys.version_info[0], sys.version_info[1],)\n\nnamespace = ''\nfor ns in namespaces:\n if curr_ver == tuple(namespaces[ns]):\n namespace = ns\n break\n\nfor base in syspaths:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__),\n namespace and os.path.join(namespace, base) or base))\n\nif __name__ == '__main__':\n from salt.scripts import salt_call\n salt_call()\n'''\n\n for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]:\n template = template.replace(tgt, salt.utils.json.dumps(cnt))\n\n return salt.utils.stringutils.to_bytes(template)\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
saltstack/salt
salt/utils/thin.py
min_sum
python
def min_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' mintar = gen_min(cachedir) return salt.utils.hashutils.get_hash(mintar, form)
Return the checksum of the current thin tarball
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L862-L867
[ "def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='',\n python2_bin='python2', python3_bin='python3'):\n '''\n Generate the salt-min tarball and print the location of the tarball\n Optional additional mods to include (e.g. mako) can be supplied as a comma\n delimited string. Permits forcing an overwrite of the output file as well.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-run min.generate\n salt-run min.generate mako\n salt-run min.generate mako,wempy 1\n salt-run min.generate overwrite=1\n '''\n mindir = os.path.join(cachedir, 'min')\n if not os.path.isdir(mindir):\n os.makedirs(mindir)\n mintar = os.path.join(mindir, 'min.tgz')\n minver = os.path.join(mindir, 'version')\n pyminver = os.path.join(mindir, '.min-gen-py-version')\n salt_call = os.path.join(mindir, 'salt-call')\n with salt.utils.files.fopen(salt_call, 'wb') as fp_:\n fp_.write(_get_salt_call())\n if os.path.isfile(mintar):\n if not overwrite:\n if os.path.isfile(minver):\n with salt.utils.files.fopen(minver) as fh_:\n overwrite = fh_.read() != salt.version.__version__\n if overwrite is False and os.path.isfile(pyminver):\n with salt.utils.files.fopen(pyminver) as fh_:\n overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function\n else:\n overwrite = True\n\n if overwrite:\n try:\n os.remove(mintar)\n except OSError:\n pass\n else:\n return mintar\n if _six.PY3:\n # Let's check for the minimum python 2 version requirement, 2.6\n py_shell_cmd = (\n python2_bin + ' -c \\'from __future__ import print_function; import sys; '\n 'print(\"{0}.{1}\".format(*(sys.version_info[:2])));\\''\n )\n cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True)\n stdout, _ = cmd.communicate()\n if cmd.returncode == 0:\n py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.'))\n if py2_version < (2, 6):\n # Bail!\n raise salt.exceptions.SaltSystemExit(\n 'The minimum required python version to run salt-ssh is \"2.6\".'\n 'The version reported by \"{0}\" is \"{1}\". Please try \"salt-ssh '\n '--python2-bin=<path-to-python-2.6-binary-or-higher>\".'.format(python2_bin,\n stdout.strip())\n )\n elif sys.version_info < (2, 6):\n # Bail! Though, how did we reached this far in the first place.\n raise salt.exceptions.SaltSystemExit(\n 'The minimum required python version to run salt-ssh is \"2.6\".'\n )\n\n tops_py_version_mapping = {}\n tops = get_tops(extra_mods=extra_mods, so_mods=so_mods)\n if _six.PY2:\n tops_py_version_mapping['2'] = tops\n else:\n tops_py_version_mapping['3'] = tops\n\n # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory.\n # This would reduce the min size.\n if _six.PY2 and sys.version_info[0] == 2:\n # Get python 3 tops\n py_shell_cmd = (\n python3_bin + ' -c \\'import sys; import json; import salt.utils.thin; '\n 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\\' '\n '\\'{0}\\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods}))\n )\n cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n stdout, stderr = cmd.communicate()\n if cmd.returncode == 0:\n try:\n tops = salt.utils.json.loads(stdout)\n tops_py_version_mapping['3'] = tops\n except ValueError:\n pass\n if _six.PY3 and sys.version_info[0] == 3:\n # Get python 2 tops\n py_shell_cmd = (\n python2_bin + ' -c \\'from __future__ import print_function; '\n 'import sys; import json; import salt.utils.thin; '\n 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\\' '\n '\\'{0}\\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods}))\n )\n cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n stdout, stderr = cmd.communicate()\n if cmd.returncode == 0:\n try:\n tops = salt.utils.json.loads(stdout.decode('utf-8'))\n tops_py_version_mapping['2'] = tops\n except ValueError:\n pass\n\n tfp = tarfile.open(mintar, 'w:gz', dereference=True)\n try: # cwd may not exist if it was removed but salt was run from it\n start_dir = os.getcwd()\n except OSError:\n start_dir = None\n tempdir = None\n\n # This is the absolute minimum set of files required to run salt-call\n min_files = (\n 'salt/__init__.py',\n 'salt/utils',\n 'salt/utils/__init__.py',\n 'salt/utils/atomicfile.py',\n 'salt/utils/validate',\n 'salt/utils/validate/__init__.py',\n 'salt/utils/validate/path.py',\n 'salt/utils/decorators',\n 'salt/utils/decorators/__init__.py',\n 'salt/utils/cache.py',\n 'salt/utils/xdg.py',\n 'salt/utils/odict.py',\n 'salt/utils/minions.py',\n 'salt/utils/dicttrim.py',\n 'salt/utils/sdb.py',\n 'salt/utils/migrations.py',\n 'salt/utils/files.py',\n 'salt/utils/parsers.py',\n 'salt/utils/locales.py',\n 'salt/utils/lazy.py',\n 'salt/utils/s3.py',\n 'salt/utils/dictupdate.py',\n 'salt/utils/verify.py',\n 'salt/utils/args.py',\n 'salt/utils/kinds.py',\n 'salt/utils/xmlutil.py',\n 'salt/utils/debug.py',\n 'salt/utils/jid.py',\n 'salt/utils/openstack',\n 'salt/utils/openstack/__init__.py',\n 'salt/utils/openstack/swift.py',\n 'salt/utils/asynchronous.py',\n 'salt/utils/process.py',\n 'salt/utils/jinja.py',\n 'salt/utils/rsax931.py',\n 'salt/utils/context.py',\n 'salt/utils/minion.py',\n 'salt/utils/error.py',\n 'salt/utils/aws.py',\n 'salt/utils/timed_subprocess.py',\n 'salt/utils/zeromq.py',\n 'salt/utils/schedule.py',\n 'salt/utils/url.py',\n 'salt/utils/yamlencoding.py',\n 'salt/utils/network.py',\n 'salt/utils/http.py',\n 'salt/utils/gzip_util.py',\n 'salt/utils/vt.py',\n 'salt/utils/templates.py',\n 'salt/utils/aggregation.py',\n 'salt/utils/yaml.py',\n 'salt/utils/yamldumper.py',\n 'salt/utils/yamlloader.py',\n 'salt/utils/event.py',\n 'salt/utils/state.py',\n 'salt/serializers',\n 'salt/serializers/__init__.py',\n 'salt/serializers/yamlex.py',\n 'salt/template.py',\n 'salt/_compat.py',\n 'salt/loader.py',\n 'salt/client',\n 'salt/client/__init__.py',\n 'salt/ext',\n 'salt/ext/__init__.py',\n 'salt/ext/six.py',\n 'salt/ext/ipaddress.py',\n 'salt/version.py',\n 'salt/syspaths.py',\n 'salt/defaults',\n 'salt/defaults/__init__.py',\n 'salt/defaults/exitcodes.py',\n 'salt/renderers',\n 'salt/renderers/__init__.py',\n 'salt/renderers/jinja.py',\n 'salt/renderers/yaml.py',\n 'salt/modules',\n 'salt/modules/__init__.py',\n 'salt/modules/test.py',\n 'salt/modules/selinux.py',\n 'salt/modules/cmdmod.py',\n 'salt/modules/saltutil.py',\n 'salt/minion.py',\n 'salt/pillar',\n 'salt/pillar/__init__.py',\n 'salt/textformat.py',\n 'salt/log',\n 'salt/log/__init__.py',\n 'salt/log/handlers',\n 'salt/log/handlers/__init__.py',\n 'salt/log/mixins.py',\n 'salt/log/setup.py',\n 'salt/cli',\n 'salt/cli/__init__.py',\n 'salt/cli/caller.py',\n 'salt/cli/daemons.py',\n 'salt/cli/salt.py',\n 'salt/cli/call.py',\n 'salt/fileserver',\n 'salt/fileserver/__init__.py',\n 'salt/transport',\n 'salt/transport/__init__.py',\n 'salt/transport/client.py',\n 'salt/exceptions.py',\n 'salt/grains',\n 'salt/grains/__init__.py',\n 'salt/grains/extra.py',\n 'salt/scripts.py',\n 'salt/state.py',\n 'salt/fileclient.py',\n 'salt/crypt.py',\n 'salt/config.py',\n 'salt/beacons',\n 'salt/beacons/__init__.py',\n 'salt/payload.py',\n 'salt/output',\n 'salt/output/__init__.py',\n 'salt/output/nested.py',\n )\n\n for py_ver, tops in _six.iteritems(tops_py_version_mapping):\n for top in tops:\n base = os.path.basename(top)\n top_dirname = os.path.dirname(top)\n if os.path.isdir(top_dirname):\n os.chdir(top_dirname)\n else:\n # This is likely a compressed python .egg\n tempdir = tempfile.mkdtemp()\n egg = zipfile.ZipFile(top_dirname)\n egg.extractall(tempdir)\n top = os.path.join(tempdir, base)\n os.chdir(tempdir)\n if not os.path.isdir(top):\n # top is a single file module\n tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base))\n continue\n for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True):\n for name in files:\n if name.endswith(('.pyc', '.pyo')):\n continue\n if root.startswith('salt') and os.path.join(root, name) not in min_files:\n continue\n tfp.add(os.path.join(root, name),\n arcname=os.path.join('py{0}'.format(py_ver), root, name))\n if tempdir is not None:\n shutil.rmtree(tempdir)\n tempdir = None\n\n os.chdir(mindir)\n tfp.add('salt-call')\n with salt.utils.files.fopen(minver, 'w+') as fp_:\n fp_.write(salt.version.__version__)\n with salt.utils.files.fopen(pyminver, 'w+') as fp_:\n fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function\n os.chdir(os.path.dirname(minver))\n tfp.add('version')\n tfp.add('.min-gen-py-version')\n if start_dir:\n os.chdir(start_dir)\n tfp.close()\n return mintar\n" ]
# -*- coding: utf-8 -*- ''' Generate the salt thin tarball from the installed python files ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import os import shutil import subprocess import sys import tarfile import tempfile import zipfile # Import third party libs import jinja2 import yaml import msgpack import salt.ext.six as _six import tornado try: import zlib except ImportError: zlib = None # pylint: disable=import-error,no-name-in-module try: import certifi except ImportError: certifi = None try: import singledispatch except ImportError: singledispatch = None try: import singledispatch_helpers except ImportError: singledispatch_helpers = None try: import backports_abc except ImportError: import salt.ext.backports_abc as backports_abc try: # New Jinja only import markupsafe except ImportError: markupsafe = None # pylint: enable=import-error,no-name-in-module try: # Older python where the backport from pypi is installed from backports import ssl_match_hostname except ImportError: # Other older python we use our bundled copy try: from salt.ext import ssl_match_hostname except ImportError: ssl_match_hostname = None # Import salt libs import salt import salt.utils.files import salt.utils.hashutils import salt.utils.json import salt.utils.path import salt.utils.stringutils import salt.exceptions import salt.version if _six.PY2: import concurrent else: concurrent = None log = logging.getLogger(__name__) def _get_salt_call(*dirs, **namespaces): ''' Return salt-call source, based on configuration. This will include additional namespaces for another versions of Salt, if needed (e.g. older interpreters etc). :dirs: List of directories to include in the system path :namespaces: Dictionary of namespace :return: ''' template = '''# -*- coding: utf-8 -*- import os import sys # Namespaces is a map: {namespace: major/minor version}, like {'2016.11.4': [2, 6]} # Appears only when configured in Master configuration. namespaces = %namespaces% # Default system paths alongside the namespaces syspaths = %dirs% syspaths.append('py{0}'.format(sys.version_info[0])) curr_ver = (sys.version_info[0], sys.version_info[1],) namespace = '' for ns in namespaces: if curr_ver == tuple(namespaces[ns]): namespace = ns break for base in syspaths: sys.path.insert(0, os.path.join(os.path.dirname(__file__), namespace and os.path.join(namespace, base) or base)) if __name__ == '__main__': from salt.scripts import salt_call salt_call() ''' for tgt, cnt in [('%dirs%', dirs), ('%namespaces%', namespaces)]: template = template.replace(tgt, salt.utils.json.dumps(cnt)) return salt.utils.stringutils.to_bytes(template) def thin_path(cachedir): ''' Return the path to the thin tarball ''' return os.path.join(cachedir, 'thin', 'thin.tgz') def _is_shareable(mod): ''' Return True if module is share-able between major Python versions. :param mod: :return: ''' # This list is subject to change shareable = ['salt', 'jinja2', 'msgpack', 'certifi'] return os.path.basename(mod) in shareable def _add_dependency(container, obj): ''' Add a dependency to the top list. :param obj: :param is_file: :return: ''' if os.path.basename(obj.__file__).split('.')[0] == '__init__': container.append(os.path.dirname(obj.__file__)) else: container.append(obj.__file__.replace('.pyc', '.py')) def gte(): ''' This function is called externally from the alternative Python interpreter from within _get_tops function. :param extra_mods: :param so_mods: :return: ''' extra = salt.utils.json.loads(sys.argv[1]) tops = get_tops(**extra) return salt.utils.json.dumps(tops, ensure_ascii=False) def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives def _get_ext_namespaces(config): ''' Get namespaces from the existing configuration. :param config: :return: ''' namespaces = {} if not config: return namespaces for ns in config: constraint_version = tuple(config[ns].get('py-version', [])) if not constraint_version: raise salt.exceptions.SaltSystemExit("An alternative version is configured, but not defined " "to what Python's major/minor version it should be constrained.") else: namespaces[ns] = constraint_version return namespaces def get_tops(extra_mods='', so_mods=''): ''' Get top directories for the dependencies, based on Python interpreter. :param extra_mods: :param so_mods: :return: ''' tops = [] for mod in [salt, jinja2, yaml, tornado, msgpack, certifi, singledispatch, concurrent, singledispatch_helpers, ssl_match_hostname, markupsafe, backports_abc]: if mod: log.debug('Adding module to the tops: "%s"', mod.__name__) _add_dependency(tops, mod) for mod in [m for m in extra_mods.split(',') if m]: if mod not in locals() and mod not in globals(): try: locals()[mod] = __import__(mod) moddir, modname = os.path.split(locals()[mod].__file__) base, _ = os.path.splitext(modname) if base == '__init__': tops.append(moddir) else: tops.append(os.path.join(moddir, base + '.py')) except ImportError as err: log.exception(err) log.error('Unable to import extra-module "%s"', mod) for mod in [m for m in so_mods.split(',') if m]: try: locals()[mod] = __import__(mod) tops.append(locals()[mod].__file__) except ImportError as err: log.exception(err) log.error('Unable to import so-module "%s"', mod) return tops def _get_supported_py_config(tops, extended_cfg): ''' Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return: ''' pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append('py2:2:7') elif py_ver == 3: pymap.append('py3:3:0') for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version'))) pymap.append('') return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) def _get_thintar_prefix(tarname): ''' Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname ''' tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3', absonly=True, compress='gzip', extended_cfg=None): ''' Generate the salt-thin tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run thin.generate salt-run thin.generate mako salt-run thin.generate mako,wempy 1 salt-run thin.generate overwrite=1 ''' if sys.version_info < (2, 6): raise salt.exceptions.SaltSystemExit('The minimum required python version to run salt-ssh is "2.6".') if compress not in ['gzip', 'zip']: log.warning('Unknown compression type: "%s". Falling back to "gzip" compression.', compress) compress = 'gzip' thindir = os.path.join(cachedir, 'thin') if not os.path.isdir(thindir): os.makedirs(thindir) thintar = os.path.join(thindir, 'thin.' + (compress == 'gzip' and 'tgz' or 'zip')) thinver = os.path.join(thindir, 'version') pythinver = os.path.join(thindir, '.thin-gen-py-version') salt_call = os.path.join(thindir, 'salt-call') pymap_cfg = os.path.join(thindir, 'supported-versions') code_checksum = os.path.join(thindir, 'code-checksum') digest_collector = salt.utils.hashutils.DigestCollector() with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call('pyall', **_get_ext_namespaces(extended_cfg))) if os.path.isfile(thintar): if not overwrite: if os.path.isfile(thinver): with salt.utils.files.fopen(thinver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pythinver): with salt.utils.files.fopen(pythinver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: log.debug('Removing %s archive file', thintar) os.remove(thintar) except OSError as exc: log.error('Error while removing %s file: %s', thintar, exc) if os.path.exists(thintar): raise salt.exceptions.SaltSystemExit( 'Unable to remove {0}. See logs for details.'.format(thintar) ) else: return thintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'".format(python2_bin) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip())) else: log.error('Unable to detect Python-2 version') log.debug(stdout) tops_failure_msg = 'Failed %s tops for Python binary %s.' tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) tops_py_version_mapping[sys.version_info.major] = tops # Collect tops, alternative to 2.x version if _six.PY2 and sys.version_info.major == 2: # Get python 3 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python3_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python3_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python3_bin) log.debug(stderr) # Collect tops, alternative to 3.x version if _six.PY3 and sys.version_info.major == 3: # Get python 2 tops py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'".format( python2_bin, salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError as err: log.error(tops_failure_msg, 'parsing', python2_bin) log.exception(err) else: log.error(tops_failure_msg, 'collecting', python2_bin) log.debug(stderr) with salt.utils.files.fopen(pymap_cfg, 'wb') as fp_: fp_.write(_get_supported_py_config(tops=tops_py_version_mapping, extended_cfg=extended_cfg)) tmp_thintar = _get_thintar_prefix(thintar) if compress == 'gzip': tfp = tarfile.open(tmp_thintar, 'w:gz', dereference=True) elif compress == 'zip': tfp = zipfile.ZipFile(tmp_thintar, 'w', compression=zlib and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) tfp.add = tfp.write try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # Pack default data log.debug('Packing default libraries based on current Salt version') for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: if absonly and not os.path.isabs(top): continue base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{}'.format(py_ver) log.debug('Packing "%s" to "%s" destination', base, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: # This is a little slow but there's no clear way to detect duplicates tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None # Pack alternative data if extended_cfg: log.debug('Packing libraries based on alternative Salt versions') for ns, cfg in _six.iteritems(get_ext_tops(extended_cfg)): tops = [cfg.get('path')] + cfg.get('dependencies') py_ver_major, py_ver_minor = cfg.get('py-version') for top in tops: base, top_dirname = os.path.basename(top), os.path.dirname(top) os.chdir(top_dirname) site_pkg_dir = _is_shareable(base) and 'pyall' or 'py{0}'.format(py_ver_major) log.debug('Packing alternative "%s" to "%s/%s" destination', base, ns, site_pkg_dir) if not os.path.isdir(top): # top is a single file module if os.path.exists(os.path.join(top_dirname, base)): tfp.add(base, arcname=os.path.join(ns, site_pkg_dir, base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if not name.endswith(('.pyc', '.pyo')): digest_collector.add(os.path.join(root, name)) arcname = os.path.join(ns, site_pkg_dir, root, name) if hasattr(tfp, 'getinfo'): try: tfp.getinfo(os.path.join(site_pkg_dir, root, name)) arcname = None except KeyError: log.debug('ZIP: Unable to add "%s" with "getinfo"', arcname) if arcname: tfp.add(os.path.join(root, name), arcname=arcname) os.chdir(thindir) with salt.utils.files.fopen(thinver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pythinver, 'w+') as fp_: fp_.write(str(sys.version_info.major)) # future lint: disable=blacklisted-function with salt.utils.files.fopen(code_checksum, 'w+') as fp_: fp_.write(digest_collector.digest()) os.chdir(os.path.dirname(thinver)) for fname in ['version', '.thin-gen-py-version', 'salt-call', 'supported-versions', 'code-checksum']: tfp.add(fname) if start_dir: os.chdir(start_dir) tfp.close() shutil.move(tmp_thintar, thintar) return thintar def thin_sum(cachedir, form='sha1'): ''' Return the checksum of the current thin tarball ''' thintar = gen_thin(cachedir) code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum') if os.path.isfile(code_checksum_path): with salt.utils.files.fopen(code_checksum_path, 'r') as fh: code_checksum = "'{0}'".format(fh.read().strip()) else: code_checksum = "'0'" return code_checksum, salt.utils.hashutils.get_hash(thintar, form) def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar
saltstack/salt
salt/modules/solarisipspkg.py
refresh_db
python
def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0
Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L112-L133
[ "def clear_rtag(opts):\n '''\n Remove the rtag file\n '''\n try:\n os.remove(rtag(opts))\n except OSError as exc:\n if exc.errno != errno.ENOENT:\n # Using __str__() here to get the fully-formatted error message\n # (error number, error message, path)\n log.warning('Encountered error removing rtag: %s', exc.__str__())\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
upgrade_available
python
def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret
Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L136-L155
[ "def _ips_get_pkgname(line):\n '''\n Extracts package name from \"pkg list -v\" output.\n Input: one line of the command output\n Output: pkg name (e.g.: \"pkg://solaris/x11/library/toolkit/libxt\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgname(line)\n '''\n return line.split()[0].split('@')[0].strip()\n", "def _ips_get_pkgversion(line):\n '''\n Extracts package version from \"pkg list -v\" output.\n Input: one line of the command output\n Output: package version (e.g.: \"1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgversion(line)\n '''\n return line.split()[0].split('@')[1].strip()\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
list_upgrades
python
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades
Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L158-L196
[ "def is_true(value=None):\n '''\n Returns a boolean value representing the \"truth\" of the value passed. The\n rules for what is a \"True\" value are:\n\n 1. Integer/float values greater than 0\n 2. The string values \"True\" and \"true\"\n 3. Any object for which bool(obj) returns True\n '''\n # First, try int/float conversion\n try:\n value = int(value)\n except (ValueError, TypeError):\n pass\n try:\n value = float(value)\n except (ValueError, TypeError):\n pass\n\n # Now check for truthiness\n if isinstance(value, (six.integer_types, float)):\n return value > 0\n elif isinstance(value, six.string_types):\n return six.text_type(value).lower() == 'true'\n else:\n return bool(value)\n", "def refresh_db(full=False, **kwargs):\n '''\n Updates the remote repos database.\n\n full : False\n\n Set to ``True`` to force a refresh of the pkg DB from all publishers,\n regardless of the last refresh time.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.refresh_db\n salt '*' pkg.refresh_db full=True\n '''\n # Remove rtag file to keep multiple refreshes from happening in pkg states\n salt.utils.pkg.clear_rtag(__opts__)\n if full:\n return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0\n else:\n return __salt__['cmd.retcode']('/bin/pkg refresh') == 0\n", "def _ips_get_pkgname(line):\n '''\n Extracts package name from \"pkg list -v\" output.\n Input: one line of the command output\n Output: pkg name (e.g.: \"pkg://solaris/x11/library/toolkit/libxt\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgname(line)\n '''\n return line.split()[0].split('@')[0].strip()\n", "def _ips_get_pkgversion(line):\n '''\n Extracts package version from \"pkg list -v\" output.\n Input: one line of the command output\n Output: package version (e.g.: \"1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgversion(line)\n '''\n return line.split()[0].split('@')[1].strip()\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
upgrade
python
def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret
Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L199-L248
[ "def is_true(value=None):\n '''\n Returns a boolean value representing the \"truth\" of the value passed. The\n rules for what is a \"True\" value are:\n\n 1. Integer/float values greater than 0\n 2. The string values \"True\" and \"true\"\n 3. Any object for which bool(obj) returns True\n '''\n # First, try int/float conversion\n try:\n value = int(value)\n except (ValueError, TypeError):\n pass\n try:\n value = float(value)\n except (ValueError, TypeError):\n pass\n\n # Now check for truthiness\n if isinstance(value, (six.integer_types, float)):\n return value > 0\n elif isinstance(value, six.string_types):\n return six.text_type(value).lower() == 'true'\n else:\n return bool(value)\n", "def list_pkgs(versions_as_list=False, **kwargs):\n '''\n List the currently installed packages as a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.list_pkgs\n '''\n # not yet implemented or not applicable\n if any([salt.utils.data.is_true(kwargs.get(x))\n for x in ('removed', 'purge_desired')]):\n return {}\n\n if 'pkg.list_pkgs' in __context__:\n if versions_as_list:\n return __context__['pkg.list_pkgs']\n else:\n ret = copy.deepcopy(__context__['pkg.list_pkgs'])\n __salt__['pkg_resource.stringify'](ret)\n return ret\n\n ret = {}\n cmd = '/bin/pkg list -Hv'\n lines = __salt__['cmd.run_stdout'](cmd).splitlines()\n # column 1 is full FMRI name in form pkg://publisher/class/name@version\n for line in lines:\n name = _ips_get_pkgname(line)\n version = _ips_get_pkgversion(line)\n __salt__['pkg_resource.add_pkg'](ret, name, version)\n\n __salt__['pkg_resource.sort_pkglist'](ret)\n __context__['pkg.list_pkgs'] = copy.deepcopy(ret)\n if not versions_as_list:\n __salt__['pkg_resource.stringify'](ret)\n return ret\n", "def refresh_db(full=False, **kwargs):\n '''\n Updates the remote repos database.\n\n full : False\n\n Set to ``True`` to force a refresh of the pkg DB from all publishers,\n regardless of the last refresh time.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.refresh_db\n salt '*' pkg.refresh_db full=True\n '''\n # Remove rtag file to keep multiple refreshes from happening in pkg states\n salt.utils.pkg.clear_rtag(__opts__)\n if full:\n return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0\n else:\n return __salt__['cmd.retcode']('/bin/pkg refresh') == 0\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
list_pkgs
python
def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret
List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L251-L289
[ "def _ips_get_pkgname(line):\n '''\n Extracts package name from \"pkg list -v\" output.\n Input: one line of the command output\n Output: pkg name (e.g.: \"pkg://solaris/x11/library/toolkit/libxt\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgname(line)\n '''\n return line.split()[0].split('@')[0].strip()\n", "def _ips_get_pkgversion(line):\n '''\n Extracts package version from \"pkg list -v\" output.\n Input: one line of the command output\n Output: package version (e.g.: \"1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgversion(line)\n '''\n return line.split()[0].split('@')[1].strip()\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
version
python
def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret
Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L292-L328
[ "def itervalues(d, **kw):\n return d.itervalues(**kw)\n", "def _ips_get_pkgname(line):\n '''\n Extracts package name from \"pkg list -v\" output.\n Input: one line of the command output\n Output: pkg name (e.g.: \"pkg://solaris/x11/library/toolkit/libxt\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgname(line)\n '''\n return line.split()[0].split('@')[0].strip()\n", "def _ips_get_pkgversion(line):\n '''\n Extracts package version from \"pkg list -v\" output.\n Input: one line of the command output\n Output: package version (e.g.: \"1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgversion(line)\n '''\n return line.split()[0].split('@')[1].strip()\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
get_fmri
python
def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret
Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L394-L418
[ "def _ips_get_pkgname(line):\n '''\n Extracts package name from \"pkg list -v\" output.\n Input: one line of the command output\n Output: pkg name (e.g.: \"pkg://solaris/x11/library/toolkit/libxt\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgname(line)\n '''\n return line.split()[0].split('@')[0].strip()\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
normalize_name
python
def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0])
Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L421-L445
[ "def _ips_get_pkgname(line):\n '''\n Extracts package name from \"pkg list -v\" output.\n Input: one line of the command output\n Output: pkg name (e.g.: \"pkg://solaris/x11/library/toolkit/libxt\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgname(line)\n '''\n return line.split()[0].split('@')[0].strip()\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
search
python
def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret
Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L465-L493
[ "def _ips_get_pkgname(line):\n '''\n Extracts package name from \"pkg list -v\" output.\n Input: one line of the command output\n Output: pkg name (e.g.: \"pkg://solaris/x11/library/toolkit/libxt\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgname(line)\n '''\n return line.split()[0].split('@')[0].strip()\n", "def _ips_get_pkgversion(line):\n '''\n Extracts package version from \"pkg list -v\" output.\n Input: one line of the command output\n Output: package version (e.g.: \"1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z\")\n Example use:\n line = \"pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--\"\n name = _ips_get_pkgversion(line)\n '''\n return line.split()[0].split('@')[1].strip()\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
install
python
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret
Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L496-L586
[ "def list_pkgs(versions_as_list=False, **kwargs):\n '''\n List the currently installed packages as a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.list_pkgs\n '''\n # not yet implemented or not applicable\n if any([salt.utils.data.is_true(kwargs.get(x))\n for x in ('removed', 'purge_desired')]):\n return {}\n\n if 'pkg.list_pkgs' in __context__:\n if versions_as_list:\n return __context__['pkg.list_pkgs']\n else:\n ret = copy.deepcopy(__context__['pkg.list_pkgs'])\n __salt__['pkg_resource.stringify'](ret)\n return ret\n\n ret = {}\n cmd = '/bin/pkg list -Hv'\n lines = __salt__['cmd.run_stdout'](cmd).splitlines()\n # column 1 is full FMRI name in form pkg://publisher/class/name@version\n for line in lines:\n name = _ips_get_pkgname(line)\n version = _ips_get_pkgversion(line)\n __salt__['pkg_resource.add_pkg'](ret, name, version)\n\n __salt__['pkg_resource.sort_pkglist'](ret)\n __context__['pkg.list_pkgs'] = copy.deepcopy(ret)\n if not versions_as_list:\n __salt__['pkg_resource.stringify'](ret)\n return ret\n", "def refresh_db(full=False, **kwargs):\n '''\n Updates the remote repos database.\n\n full : False\n\n Set to ``True`` to force a refresh of the pkg DB from all publishers,\n regardless of the last refresh time.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.refresh_db\n salt '*' pkg.refresh_db full=True\n '''\n # Remove rtag file to keep multiple refreshes from happening in pkg states\n salt.utils.pkg.clear_rtag(__opts__)\n if full:\n return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0\n else:\n return __salt__['cmd.retcode']('/bin/pkg refresh') == 0\n", "def is_installed(name, **kwargs):\n '''\n Returns True if the package is installed. Otherwise returns False.\n Name can be full or partial FMRI.\n In case of multiple match from partial FMRI name, it returns True.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.is_installed bash\n '''\n\n cmd = ['/bin/pkg', 'list', '-Hv', name]\n return __salt__['cmd.retcode'](cmd) == 0\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/modules/solarisipspkg.py
remove
python
def remove(name=None, pkgs=None, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these packages instead of %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the package(s) cmd = ['/bin/pkg', 'uninstall', '-v'] + targets out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred removing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) return ret
Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L589-L645
[ "def split_input(val, mapper=None):\n '''\n Take an input value and split it into a list, returning the resulting list\n '''\n if mapper is None:\n mapper = lambda x: x\n if isinstance(val, list):\n return list(map(mapper, val))\n try:\n return list(map(mapper, [x.strip() for x in val.split(',')]))\n except AttributeError:\n return list(map(mapper, [x.strip() for x in six.text_type(val).split(',')]))\n", "def list_pkgs(versions_as_list=False, **kwargs):\n '''\n List the currently installed packages as a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.list_pkgs\n '''\n # not yet implemented or not applicable\n if any([salt.utils.data.is_true(kwargs.get(x))\n for x in ('removed', 'purge_desired')]):\n return {}\n\n if 'pkg.list_pkgs' in __context__:\n if versions_as_list:\n return __context__['pkg.list_pkgs']\n else:\n ret = copy.deepcopy(__context__['pkg.list_pkgs'])\n __salt__['pkg_resource.stringify'](ret)\n return ret\n\n ret = {}\n cmd = '/bin/pkg list -Hv'\n lines = __salt__['cmd.run_stdout'](cmd).splitlines()\n # column 1 is full FMRI name in form pkg://publisher/class/name@version\n for line in lines:\n name = _ips_get_pkgname(line)\n version = _ips_get_pkgversion(line)\n __salt__['pkg_resource.add_pkg'](ret, name, version)\n\n __salt__['pkg_resource.sort_pkglist'](ret)\n __context__['pkg.list_pkgs'] = copy.deepcopy(ret)\n if not versions_as_list:\n __salt__['pkg_resource.stringify'](ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' IPS pkg support for Solaris .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. This module provides support for Solaris 11 new package management - IPS (Image Packaging System). This is the default pkg module for Solaris 11 (and later). If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider in sls for each package: .. code-block:: yaml mypackage: pkg.installed: - provider: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this: .. code-block:: yaml providers: pkg: pkgutil ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import logging import itertools # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.path import salt.utils.pkg from salt.ext.six import string_types from salt.exceptions import CommandExecutionError from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin from functools import reduce # Define the module's virtual name __virtualname__ = 'pkg' log = logging.getLogger(__name__) def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' 'on Solaris >= 11.') ips_pkg_return_values = { 0: 'Command succeeded.', 1: 'An error occurred.', 2: 'Invalid command line options were specified.', 3: 'Multiple operations were requested, but only some of them succeeded.', 4: 'No changes were made - nothing to do.', 5: 'The requested operation cannot be performed on a live image.', 6: 'The requested operation cannot be completed because the licenses for ' 'the packages being installed or updated have not been accepted.', 7: 'The image is currently in use by another process and cannot be ' 'modified.' } def _ips_get_pkgname(line): ''' Extracts package name from "pkg list -v" output. Input: one line of the command output Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgname(line) ''' return line.split()[0].split('@')[0].strip() def _ips_get_pkgversion(line): ''' Extracts package version from "pkg list -v" output. Input: one line of the command output Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z") Example use: line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--" name = _ips_get_pkgversion(line) ''' return line.split()[0].split('@')[1].strip() def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0 def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Lists all packages available for update. When run in global zone, it reports only upgradable packages for the global zone. When run in non-global zone, it can report more upgradable packages than ``pkg update -vn``, because ``pkg update`` hides packages that require newer version of ``pkg://solaris/entire`` (which means that they can be upgraded only from the global zone). If ``pkg://solaris/entire`` is found in the list of upgrades, then the global zone should be updated to get all possible updates. Use ``refresh=True`` to refresh the package database. refresh : True Runs a full package database refresh before listing. Set to ``False`` to disable running the refresh. .. versionchanged:: 2017.7.0 In previous versions of Salt, ``refresh`` defaulted to ``False``. This was changed to default to ``True`` in the 2017.7.0 release to make the behavior more consistent with the other package modules, which all default to ``True``. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades salt '*' pkg.list_upgrades refresh=False ''' if salt.utils.data.is_true(refresh): refresh_db(full=True) upgrades = {} # awk is in core-os package so we can use it without checking lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines() for line in lines: upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return upgrades def upgrade(refresh=False, **kwargs): ''' Upgrade all packages to the latest possible version. When run in global zone, it updates also all non-global zones. In non-global zones upgrade is limited by dependency constrains linked to the version of pkg://solaris/entire. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} When there is a failure, an explanation is also included in the error message, based on the return code of the ``pkg update`` command. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' if salt.utils.data.is_true(refresh): refresh_db() # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = ['pkg', 'update', '-v', '--accept'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'retcode': ips_pkg_return_values[result['retcode']], 'result': result} ) return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the currently installed packages as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = '/bin/pkg list -Hv' lines = __salt__['cmd.run_stdout'](cmd).splitlines() # column 1 is full FMRI name in form pkg://publisher/class/name@version for line in lines: name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret def latest_version(*names, **kwargs): ''' The available version of packages in the repository. Accepts full or partial FMRI. Partial FMRI is returned if the full FMRI could not be resolved. If the latest version of a given package is already installed, an empty string will be returned for that package. Please use pkg.latest_version as pkg.available_version is being deprecated. .. versionchanged:: 2019.2.0 Support for multiple package names added. CLI Example: .. code-block:: bash salt '*' pkg.latest_version bash salt '*' pkg.latest_version pkg://solaris/entire salt '*' pkg.latest_version postfix sendmail ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hnv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) installed = version(*names) if len(names) == 1: # Convert back our result in a dict if only one name is passed installed = {list(ret)[0] if ret else names[0]: installed} for name in ret: if name not in installed: continue if ret[name] == installed[name]: ret[name] = '' # Append package names which are not found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret def normalize_name(name, **kwargs): ''' Internal function. Normalizes pkg name to full FMRI before running pkg.install. In case of multiple matches or no match, it returns the name without modifications. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name vim ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() # if we get more lines, it's multiple match (name not unique) # if we get zero lines, pkg is not installed # in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it if len(lines) != 1: return name # return pkg name return _ips_get_pkgname(lines[0]) def is_installed(name, **kwargs): ''' Returns True if the package is installed. Otherwise returns False. Name can be full or partial FMRI. In case of multiple match from partial FMRI name, it returns True. CLI Example: .. code-block:: bash salt '*' pkg.is_installed bash ''' cmd = ['/bin/pkg', 'list', '-Hv', name] return __salt__['cmd.retcode'](cmd) == 0 def search(name, versions_as_list=False, **kwargs): ''' Searches the repository for given pkg name. The name can be full or partial FMRI. All matches are printed. Globs are also supported. CLI Example: .. code-block:: bash salt '*' pkg.search bash ''' ret = {} cmd = ['/bin/pkg', 'list', '-aHv', name] out = __salt__['cmd.run_all'](cmd, ignore_retcode=True) if out['retcode'] != 0: # error = nothing found return {} # no error, processing pkg listing # column 1 is full FMRI name in form pkg://publisher/pkg/name@version for line in out['stdout'].splitlines(): name = _ips_get_pkgname(line) version = _ips_get_pkgversion(line) __salt__['pkg_resource.add_pkg'](ret, name, version) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named package using the IPS pkg command. Accepts full or partial FMRI. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} Multiple Package Installation Options: pkgs A list of packages to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install vim salt '*' pkg.install pkg://solaris/editor/vim salt '*' pkg.install pkg://solaris/editor/vim refresh=True salt '*' pkg.install pkgs='["foo", "bar"]' ''' if not pkgs: if is_installed(name): return {} if refresh: refresh_db(full=True) pkg2inst = '' if pkgs: # multiple packages specified pkg2inst = [] for pkg in pkgs: if getattr(pkg, 'items', False): if list(pkg.items())[0][1]: # version specified pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0], list(pkg.items())[0][1])) else: pkg2inst.append(list(pkg.items())[0][0]) else: pkg2inst.append("{0}".format(pkg)) log.debug('Installing these packages instead of %s: %s', name, pkg2inst) else: # install single package if version: pkg2inst = "{0}@{1}".format(name, version) else: pkg2inst = "{0}".format(name) cmd = ['pkg', 'install', '-v', '--accept'] if test: cmd.append('-n') # Get a list of the packages before install so we can diff after to see # what got installed. old = list_pkgs() # Install or upgrade the package # If package is already installed if isinstance(pkg2inst, string_types): cmd.append(pkg2inst) elif isinstance(pkg2inst, list): cmd = cmd + pkg2inst out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') # Get a list of the packages again, including newly installed ones. __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if out['retcode'] != 0: raise CommandExecutionError( 'Error occurred installing package(s)', info={ 'changes': ret, 'retcode': ips_pkg_return_values[out['retcode']], 'errors': [out['stderr']] } ) # No error occurred if test: return 'Test succeeded.' return ret def purge(name, **kwargs): ''' Remove specified package. Accepts full or partial FMRI. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> ''' return remove(name, **kwargs)
saltstack/salt
salt/pillar/cmd_json.py
ext_pillar
python
def ext_pillar(minion_id, # pylint: disable=W0613 pillar, # pylint: disable=W0613 command): ''' Execute a command and read the output as JSON ''' try: command = command.replace('%s', minion_id) return salt.utils.json.loads(__salt__['cmd.run'](command)) except Exception: log.critical('JSON data from %s failed to parse', command) return {}
Execute a command and read the output as JSON
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/cmd_json.py#L20-L31
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n" ]
# -*- coding: utf-8 -*- ''' Execute a command and read the output as JSON. The JSON data is then directly overlaid onto the minion's Pillar data. ''' from __future__ import absolute_import, print_function, unicode_literals # Don't "fix" the above docstring to put it on two lines, as the sphinx # autosummary pulls only the first line for its description. # Import Python libs import logging # Import Salt libs import salt.utils.json # Set up logging log = logging.getLogger(__name__)
saltstack/salt
salt/modules/mac_brew_pkg.py
_tap
python
def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True
Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L55-L70
[ "def _list_taps():\n '''\n List currently installed brew taps\n '''\n cmd = 'tap'\n return _call_brew(cmd)['stdout'].splitlines()\n", "def _call_brew(cmd, failhard=True):\n '''\n Calls the brew command with the user account of brew\n '''\n user = __salt__['file.get_user'](_homebrew_bin())\n runas = user if user != __opts__['user'] else None\n cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd)\n result = __salt__['cmd.run_all'](cmd,\n runas=runas,\n output_loglevel='trace',\n python_shell=False)\n if failhard and result['retcode'] != 0:\n raise CommandExecutionError('Brew command failed',\n info={'result': result})\n return result\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output)) def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/modules/mac_brew_pkg.py
_call_brew
python
def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result
Calls the brew command with the user account of brew
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L82-L96
[ "def _homebrew_bin():\n '''\n Returns the full path to the homebrew binary in the PATH\n '''\n ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace')\n ret += '/bin/brew'\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output)) def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/modules/mac_brew_pkg.py
list_pkgs
python
def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret
List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L99-L166
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def is_true(value=None):\n '''\n Returns a boolean value representing the \"truth\" of the value passed. The\n rules for what is a \"True\" value are:\n\n 1. Integer/float values greater than 0\n 2. The string values \"True\" and \"true\"\n 3. Any object for which bool(obj) returns True\n '''\n # First, try int/float conversion\n try:\n value = int(value)\n except (ValueError, TypeError):\n pass\n try:\n value = float(value)\n except (ValueError, TypeError):\n pass\n\n # Now check for truthiness\n if isinstance(value, (six.integer_types, float)):\n return value > 0\n elif isinstance(value, six.string_types):\n return six.text_type(value).lower() == 'true'\n else:\n return bool(value)\n", "def _call_brew(cmd, failhard=True):\n '''\n Calls the brew command with the user account of brew\n '''\n user = __salt__['file.get_user'](_homebrew_bin())\n runas = user if user != __opts__['user'] else None\n cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd)\n result = __salt__['cmd.run_all'](cmd,\n runas=runas,\n output_loglevel='trace',\n python_shell=False)\n if failhard and result['retcode'] != 0:\n raise CommandExecutionError('Brew command failed',\n info={'result': result})\n return result\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output)) def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/modules/mac_brew_pkg.py
remove
python
def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret
Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L220-L276
[ "def list_pkgs(versions_as_list=False, **kwargs):\n '''\n List the packages currently installed in a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.list_pkgs\n '''\n versions_as_list = salt.utils.data.is_true(versions_as_list)\n # not yet implemented or not applicable\n if any([salt.utils.data.is_true(kwargs.get(x))\n for x in ('removed', 'purge_desired')]):\n return {}\n\n if 'pkg.list_pkgs' in __context__:\n if versions_as_list:\n return __context__['pkg.list_pkgs']\n else:\n ret = copy.deepcopy(__context__['pkg.list_pkgs'])\n __salt__['pkg_resource.stringify'](ret)\n return ret\n\n ret = {}\n cmd = 'info --json=v1 --installed'\n package_info = salt.utils.json.loads(_call_brew(cmd)['stdout'])\n\n for package in package_info:\n # Brew allows multiple versions of the same package to be installed.\n # Salt allows for this, so it must be accounted for.\n versions = [v['version'] for v in package['installed']]\n # Brew allows for aliasing of packages, all of which will be\n # installable from a Salt call, so all names must be accounted for.\n names = package['aliases'] + [package['name'], package['full_name']]\n # Create a list of tuples containing all possible combinations of\n # names and versions, because all are valid.\n combinations = [(n, v) for n in names for v in versions]\n\n for name, version in combinations:\n __salt__['pkg_resource.add_pkg'](ret, name, version)\n\n # Grab packages from brew cask, if available.\n # Brew Cask doesn't provide a JSON interface, must be parsed the old way.\n try:\n cask_cmd = 'cask list --versions'\n out = _call_brew(cask_cmd)['stdout']\n\n for line in out.splitlines():\n try:\n name_and_versions = line.split(' ')\n name = '/'.join(('caskroom/cask', name_and_versions[0]))\n installed_versions = name_and_versions[1:]\n key_func = functools.cmp_to_key(salt.utils.versions.version_cmp)\n newest_version = sorted(installed_versions, key=key_func).pop()\n except ValueError:\n continue\n __salt__['pkg_resource.add_pkg'](ret, name, newest_version)\n except CommandExecutionError:\n pass\n\n __salt__['pkg_resource.sort_pkglist'](ret)\n __context__['pkg.list_pkgs'] = copy.deepcopy(ret)\n if not versions_as_list:\n __salt__['pkg_resource.stringify'](ret)\n return ret\n", "def _call_brew(cmd, failhard=True):\n '''\n Calls the brew command with the user account of brew\n '''\n user = __salt__['file.get_user'](_homebrew_bin())\n runas = user if user != __opts__['user'] else None\n cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd)\n result = __salt__['cmd.run_all'](cmd,\n runas=runas,\n output_loglevel='trace',\n python_shell=False)\n if failhard and result['retcode'] != 0:\n raise CommandExecutionError('Brew command failed',\n info={'result': result})\n return result\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output)) def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/modules/mac_brew_pkg.py
refresh_db
python
def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True
Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L279-L296
[ "def clear_rtag(opts):\n '''\n Remove the rtag file\n '''\n try:\n os.remove(rtag(opts))\n except OSError as exc:\n if exc.errno != errno.ENOENT:\n # Using __str__() here to get the fully-formatted error message\n # (error number, error message, path)\n log.warning('Encountered error removing rtag: %s', exc.__str__())\n", "def _call_brew(cmd, failhard=True):\n '''\n Calls the brew command with the user account of brew\n '''\n user = __salt__['file.get_user'](_homebrew_bin())\n runas = user if user != __opts__['user'] else None\n cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd)\n result = __salt__['cmd.run_all'](cmd,\n runas=runas,\n output_loglevel='trace',\n python_shell=False)\n if failhard and result['retcode'] != 0:\n raise CommandExecutionError('Brew command failed',\n info={'result': result})\n return result\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output)) def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/modules/mac_brew_pkg.py
_info
python
def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output))
Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L299-L321
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def _call_brew(cmd, failhard=True):\n '''\n Calls the brew command with the user account of brew\n '''\n user = __salt__['file.get_user'](_homebrew_bin())\n runas = user if user != __opts__['user'] else None\n cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd)\n result = __salt__['cmd.run_all'](cmd,\n runas=runas,\n output_loglevel='trace',\n python_shell=False)\n if failhard and result['retcode'] != 0:\n raise CommandExecutionError('Brew command failed',\n info={'result': result})\n return result\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/modules/mac_brew_pkg.py
install
python
def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret
Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L324-L430
[ "def list_pkgs(versions_as_list=False, **kwargs):\n '''\n List the packages currently installed in a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.list_pkgs\n '''\n versions_as_list = salt.utils.data.is_true(versions_as_list)\n # not yet implemented or not applicable\n if any([salt.utils.data.is_true(kwargs.get(x))\n for x in ('removed', 'purge_desired')]):\n return {}\n\n if 'pkg.list_pkgs' in __context__:\n if versions_as_list:\n return __context__['pkg.list_pkgs']\n else:\n ret = copy.deepcopy(__context__['pkg.list_pkgs'])\n __salt__['pkg_resource.stringify'](ret)\n return ret\n\n ret = {}\n cmd = 'info --json=v1 --installed'\n package_info = salt.utils.json.loads(_call_brew(cmd)['stdout'])\n\n for package in package_info:\n # Brew allows multiple versions of the same package to be installed.\n # Salt allows for this, so it must be accounted for.\n versions = [v['version'] for v in package['installed']]\n # Brew allows for aliasing of packages, all of which will be\n # installable from a Salt call, so all names must be accounted for.\n names = package['aliases'] + [package['name'], package['full_name']]\n # Create a list of tuples containing all possible combinations of\n # names and versions, because all are valid.\n combinations = [(n, v) for n in names for v in versions]\n\n for name, version in combinations:\n __salt__['pkg_resource.add_pkg'](ret, name, version)\n\n # Grab packages from brew cask, if available.\n # Brew Cask doesn't provide a JSON interface, must be parsed the old way.\n try:\n cask_cmd = 'cask list --versions'\n out = _call_brew(cask_cmd)['stdout']\n\n for line in out.splitlines():\n try:\n name_and_versions = line.split(' ')\n name = '/'.join(('caskroom/cask', name_and_versions[0]))\n installed_versions = name_and_versions[1:]\n key_func = functools.cmp_to_key(salt.utils.versions.version_cmp)\n newest_version = sorted(installed_versions, key=key_func).pop()\n except ValueError:\n continue\n __salt__['pkg_resource.add_pkg'](ret, name, newest_version)\n except CommandExecutionError:\n pass\n\n __salt__['pkg_resource.sort_pkglist'](ret)\n __context__['pkg.list_pkgs'] = copy.deepcopy(ret)\n if not versions_as_list:\n __salt__['pkg_resource.stringify'](ret)\n return ret\n", "def _call_brew(cmd, failhard=True):\n '''\n Calls the brew command with the user account of brew\n '''\n user = __salt__['file.get_user'](_homebrew_bin())\n runas = user if user != __opts__['user'] else None\n cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd)\n result = __salt__['cmd.run_all'](cmd,\n runas=runas,\n output_loglevel='trace',\n python_shell=False)\n if failhard and result['retcode'] != 0:\n raise CommandExecutionError('Brew command failed',\n info={'result': result})\n return result\n", "def _tap(tap, runas=None):\n '''\n Add unofficial GitHub repos to the list of formulas that brew tracks,\n updates, and installs from.\n '''\n if tap in _list_taps():\n return True\n\n cmd = 'tap {0}'.format(tap)\n try:\n _call_brew(cmd)\n except CommandExecutionError:\n log.error('Failed to tap \"%s\"', tap)\n return False\n\n return True\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output)) def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/modules/mac_brew_pkg.py
list_upgrades
python
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret
Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L433-L459
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def refresh_db(**kwargs):\n '''\n Update the homebrew package repository.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.refresh_db\n '''\n # Remove rtag file to keep multiple refreshes from happening in pkg states\n salt.utils.pkg.clear_rtag(__opts__)\n cmd = 'update'\n if _call_brew(cmd)['retcode']:\n log.error('Failed to update')\n return False\n\n return True\n", "def _call_brew(cmd, failhard=True):\n '''\n Calls the brew command with the user account of brew\n '''\n user = __salt__['file.get_user'](_homebrew_bin())\n runas = user if user != __opts__['user'] else None\n cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd)\n result = __salt__['cmd.run_all'](cmd,\n runas=runas,\n output_loglevel='trace',\n python_shell=False)\n if failhard and result['retcode'] != 0:\n raise CommandExecutionError('Brew command failed',\n info={'result': result})\n return result\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output)) def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/modules/mac_brew_pkg.py
upgrade
python
def upgrade(refresh=True, **kwargs): ''' Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } old = list_pkgs() if salt.utils.data.is_true(refresh): refresh_db() result = _call_brew('upgrade', failhard=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret
Upgrade outdated, unpinned brews. refresh Fetch the newest version of Homebrew and all formulae from GitHub before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_brew_pkg.py#L475-L517
[ "def is_true(value=None):\n '''\n Returns a boolean value representing the \"truth\" of the value passed. The\n rules for what is a \"True\" value are:\n\n 1. Integer/float values greater than 0\n 2. The string values \"True\" and \"true\"\n 3. Any object for which bool(obj) returns True\n '''\n # First, try int/float conversion\n try:\n value = int(value)\n except (ValueError, TypeError):\n pass\n try:\n value = float(value)\n except (ValueError, TypeError):\n pass\n\n # Now check for truthiness\n if isinstance(value, (six.integer_types, float)):\n return value > 0\n elif isinstance(value, six.string_types):\n return six.text_type(value).lower() == 'true'\n else:\n return bool(value)\n", "def list_pkgs(versions_as_list=False, **kwargs):\n '''\n List the packages currently installed in a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.list_pkgs\n '''\n versions_as_list = salt.utils.data.is_true(versions_as_list)\n # not yet implemented or not applicable\n if any([salt.utils.data.is_true(kwargs.get(x))\n for x in ('removed', 'purge_desired')]):\n return {}\n\n if 'pkg.list_pkgs' in __context__:\n if versions_as_list:\n return __context__['pkg.list_pkgs']\n else:\n ret = copy.deepcopy(__context__['pkg.list_pkgs'])\n __salt__['pkg_resource.stringify'](ret)\n return ret\n\n ret = {}\n cmd = 'info --json=v1 --installed'\n package_info = salt.utils.json.loads(_call_brew(cmd)['stdout'])\n\n for package in package_info:\n # Brew allows multiple versions of the same package to be installed.\n # Salt allows for this, so it must be accounted for.\n versions = [v['version'] for v in package['installed']]\n # Brew allows for aliasing of packages, all of which will be\n # installable from a Salt call, so all names must be accounted for.\n names = package['aliases'] + [package['name'], package['full_name']]\n # Create a list of tuples containing all possible combinations of\n # names and versions, because all are valid.\n combinations = [(n, v) for n in names for v in versions]\n\n for name, version in combinations:\n __salt__['pkg_resource.add_pkg'](ret, name, version)\n\n # Grab packages from brew cask, if available.\n # Brew Cask doesn't provide a JSON interface, must be parsed the old way.\n try:\n cask_cmd = 'cask list --versions'\n out = _call_brew(cask_cmd)['stdout']\n\n for line in out.splitlines():\n try:\n name_and_versions = line.split(' ')\n name = '/'.join(('caskroom/cask', name_and_versions[0]))\n installed_versions = name_and_versions[1:]\n key_func = functools.cmp_to_key(salt.utils.versions.version_cmp)\n newest_version = sorted(installed_versions, key=key_func).pop()\n except ValueError:\n continue\n __salt__['pkg_resource.add_pkg'](ret, name, newest_version)\n except CommandExecutionError:\n pass\n\n __salt__['pkg_resource.sort_pkglist'](ret)\n __context__['pkg.list_pkgs'] = copy.deepcopy(ret)\n if not versions_as_list:\n __salt__['pkg_resource.stringify'](ret)\n return ret\n", "def refresh_db(**kwargs):\n '''\n Update the homebrew package repository.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.refresh_db\n '''\n # Remove rtag file to keep multiple refreshes from happening in pkg states\n salt.utils.pkg.clear_rtag(__opts__)\n cmd = 'update'\n if _call_brew(cmd)['retcode']:\n log.error('Failed to update')\n return False\n\n return True\n", "def _call_brew(cmd, failhard=True):\n '''\n Calls the brew command with the user account of brew\n '''\n user = __salt__['file.get_user'](_homebrew_bin())\n runas = user if user != __opts__['user'] else None\n cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd)\n result = __salt__['cmd.run_all'](cmd,\n runas=runas,\n output_loglevel='trace',\n python_shell=False)\n if failhard and result['retcode'] != 0:\n raise CommandExecutionError('Brew command failed',\n info={'result': result})\n return result\n" ]
# -*- coding: utf-8 -*- ''' Homebrew for macOS .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import functools import logging # Import salt libs import salt.utils.data import salt.utils.functools import salt.utils.json import salt.utils.path import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Import third party libs from salt.ext import six from salt.ext.six.moves import zip log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confine this module to Mac OS with Homebrew. ''' if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') def _list_taps(): ''' List currently installed brew taps ''' cmd = 'tap' return _call_brew(cmd)['stdout'].splitlines() def _tap(tap, runas=None): ''' Add unofficial GitHub repos to the list of formulas that brew tracks, updates, and installs from. ''' if tap in _list_taps(): return True cmd = 'tap {0}'.format(tap) try: _call_brew(cmd) except CommandExecutionError: log.error('Failed to tap "%s"', tap) return False return True def _homebrew_bin(): ''' Returns the full path to the homebrew binary in the PATH ''' ret = __salt__['cmd.run']('brew --prefix', output_loglevel='trace') ret += '/bin/brew' return ret def _call_brew(cmd, failhard=True): ''' Calls the brew command with the user account of brew ''' user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} cmd = 'info --json=v1 --installed' package_info = salt.utils.json.loads(_call_brew(cmd)['stdout']) for package in package_info: # Brew allows multiple versions of the same package to be installed. # Salt allows for this, so it must be accounted for. versions = [v['version'] for v in package['installed']] # Brew allows for aliasing of packages, all of which will be # installable from a Salt call, so all names must be accounted for. names = package['aliases'] + [package['name'], package['full_name']] # Create a list of tuples containing all possible combinations of # names and versions, because all are valid. combinations = [(n, v) for n in names for v in versions] for name, version in combinations: __salt__['pkg_resource.add_pkg'](ret, name, version) # Grab packages from brew cask, if available. # Brew Cask doesn't provide a JSON interface, must be parsed the old way. try: cask_cmd = 'cask list --versions' out = _call_brew(cask_cmd)['stdout'] for line in out.splitlines(): try: name_and_versions = line.split(' ') name = '/'.join(('caskroom/cask', name_and_versions[0])) installed_versions = name_and_versions[1:] key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, name, newest_version) except CommandExecutionError: pass __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ''' return __salt__['pkg_resource.version'](*names, **kwargs) def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation Currently chooses stable versions, falling back to devel if that does not exist. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if refresh: refresh_db() def get_version(pkg_info): # Perhaps this will need an option to pick devel by default return pkg_info['versions']['stable'] or pkg_info['versions']['devel'] versions_dict = dict((key, get_version(val)) for key, val in six.iteritems(_info(*names))) if len(names) == 1: return next(six.itervalues(versions_dict)) else: return versions_dict # available_version is being deprecated available_version = salt.utils.functools.alias_function(latest_version, 'available_version') def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret def refresh_db(**kwargs): ''' Update the homebrew package repository. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = 'update' if _call_brew(cmd)['retcode']: log.error('Failed to update') return False return True def _info(*pkgs): ''' Get all info brew can provide about a list of packages. Does not do any kind of processing, so the format depends entirely on the output brew gives. This may change if a new version of the format is requested. On failure, returns an empty dict and logs failure. On success, returns a dict mapping each item in pkgs to its corresponding object in the output of 'brew info'. Caveat: If one of the packages does not exist, no packages will be included in the output. ''' cmd = 'info --json=v1 {0}'.format(' '.join(pkgs)) brew_result = _call_brew(cmd) if brew_result['retcode']: log.error('Failed to get info about packages: %s', ' '.join(pkgs)) return {} output = salt.utils.json.loads(brew_result['stdout']) return dict(zip(pkgs, output)) def install(name=None, pkgs=None, taps=None, options=None, **kwargs): ''' Install the passed package(s) with ``brew install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> taps Unofficial GitHub repos to use when updating and installing formulas. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install zlib taps='homebrew/dupes' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options Options to pass to brew. Only applies to initial install. Due to how brew works, modifying chosen options requires a full uninstall followed by a fresh install. Note that if "pkgs" is used, all options will be passed to all packages. Unrecognized options for a package will be silently ignored by brew. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> tap='<tap>' salt '*' pkg.install php54 taps='["josegonzalez/php", "homebrew/dupes"]' options='["--with-fpm"]' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, kwargs.get('sources', {}) ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} formulas = ' '.join(pkg_params) old = list_pkgs() # Ensure we've tapped the repo if necessary if taps: if not isinstance(taps, list): # Feels like there is a better way to allow for tap being # specified as both a string and a list taps = [taps] for tap in taps: _tap(tap) if options: cmd = 'install {0} {1}'.format(formulas, ' '.join(options)) else: cmd = 'install {0}'.format(formulas) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' Check whether or not an upgrade is available for all packages CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if refresh: refresh_db() res = _call_brew('outdated --json=v1') ret = {} try: data = salt.utils.json.loads(res['stdout']) except ValueError as err: msg = 'unable to interpret output from "brew outdated": {0}'.format(err) log.error(msg) raise CommandExecutionError(msg) for pkg in data: # current means latest available to brew ret[pkg['name']] = pkg['current_version'] return ret def upgrade_available(pkg, **kwargs): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return pkg in list_upgrades() def info_installed(*names, **kwargs): ''' Return the information of the named package(s) installed on the system. .. versionadded:: 2016.3.1 names The names of the packages for which to return information. CLI example: .. code-block:: bash salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... ''' return _info(*names)
saltstack/salt
salt/states/chronos_job.py
config
python
def config(name, config): ''' Ensure that the chronos job with the given name is present and is configured to match the given config values. :param name: The job name :param config: The configuration to apply (dict) :return: A standard Salt changes dictionary ''' # setup return structure ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '', } # get existing config if job is present existing_config = None if __salt__['chronos.has_job'](name): existing_config = __salt__['chronos.job'](name)['job'] # compare existing config with defined config if existing_config: update_config = copy.deepcopy(existing_config) salt.utils.configcomparer.compare_and_update_config( config, update_config, ret['changes'], ) else: # the job is not configured--we need to create it from scratch ret['changes']['job'] = { 'new': config, 'old': None, } update_config = config if ret['changes']: # if the only change is in schedule, check to see if patterns are equivalent if 'schedule' in ret['changes'] and len(ret['changes']) == 1: if 'new' in ret['changes']['schedule'] and 'old' in ret['changes']['schedule']: new = ret['changes']['schedule']['new'] log.debug('new schedule: %s', new) old = ret['changes']['schedule']['old'] log.debug('old schedule: %s', old) if new and old: _new = new.split('/') log.debug('_new schedule: %s', _new) _old = old.split('/') log.debug('_old schedule: %s', _old) if len(_new) == 3 and len(_old) == 3: log.debug('_new[0] == _old[0]: %s', six.text_type(_new[0]) == six.text_type(_old[0])) log.debug('_new[2] == _old[2]: %s', six.text_type(_new[2]) == six.text_type(_old[2])) if six.text_type(_new[0]) == six.text_type(_old[0]) and \ six.text_type(_new[2]) == six.text_type(_old[2]): log.debug('schedules match--no need for changes') ret['changes'] = {} # update the config if we registered any changes log.debug('schedules match--no need for changes') if ret['changes']: # if test report there will be an update if __opts__['test']: ret['result'] = None ret['comment'] = 'Chronos job {0} is set to be updated'.format( name ) return ret update_result = __salt__['chronos.update_job'](name, update_config) if 'exception' in update_result: ret['result'] = False ret['comment'] = 'Failed to update job config for {0}: {1}'.format( name, update_result['exception'], ) return ret else: ret['result'] = True ret['comment'] = 'Updated job config for {0}'.format(name) return ret ret['result'] = True ret['comment'] = 'Chronos job {0} configured correctly'.format(name) return ret
Ensure that the chronos job with the given name is present and is configured to match the given config values. :param name: The job name :param config: The configuration to apply (dict) :return: A standard Salt changes dictionary
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/chronos_job.py#L27-L113
[ "def compare_and_update_config(config, update_config, changes, namespace=''):\n '''\n Recursively compare two configs, writing any needed changes to the\n update_config and capturing changes in the changes dict.\n '''\n if isinstance(config, dict):\n if not update_config:\n if config:\n # the updated config is more valid--report that we are using it\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n elif not isinstance(update_config, dict):\n # new config is a dict, other isn't--new one wins\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n else:\n # compare each key in the base config with the values in the\n # update_config, overwriting the values that are different but\n # keeping any that are not defined in config\n for key, value in six.iteritems(config):\n _namespace = key\n if namespace:\n _namespace = '{0}.{1}'.format(namespace, _namespace)\n update_config[key] = compare_and_update_config(\n value,\n update_config.get(key, None),\n changes,\n namespace=_namespace,\n )\n return update_config\n\n elif isinstance(config, list):\n if not update_config:\n if config:\n # the updated config is more valid--report that we are using it\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n elif not isinstance(update_config, list):\n # new config is a list, other isn't--new one wins\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n else:\n # iterate through config list, ensuring that each index in the\n # update_config list is the same\n for idx, item in enumerate(config):\n _namespace = '[{0}]'.format(idx)\n if namespace:\n _namespace = '{0}{1}'.format(namespace, _namespace)\n _update = None\n if len(update_config) > idx:\n _update = update_config[idx]\n if _update:\n update_config[idx] = compare_and_update_config(\n config[idx],\n _update,\n changes,\n namespace=_namespace,\n )\n else:\n changes[_namespace] = {\n 'new': config[idx],\n 'old': _update,\n }\n update_config.append(config[idx])\n\n if len(update_config) > len(config):\n # trim any items in update_config that are not in config\n for idx, old_item in enumerate(update_config):\n if idx < len(config):\n continue\n _namespace = '[{0}]'.format(idx)\n if namespace:\n _namespace = '{0}{1}'.format(namespace, _namespace)\n changes[_namespace] = {\n 'new': None,\n 'old': old_item,\n }\n del update_config[len(config):]\n return update_config\n\n else:\n if config != update_config:\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n" ]
# -*- coding: utf-8 -*- ''' Configure Chronos jobs via a salt proxy. .. code-block:: yaml my_job: chronos_job.config: - config: schedule: "R//PT2S" command: "echo 'hi'" owner: "me@example.com" .. versionadded:: 2015.8.2 ''' from __future__ import absolute_import, print_function, unicode_literals import copy import logging from salt.ext import six import salt.utils.configcomparer __proxyenabled__ = ['chronos'] log = logging.getLogger(__file__) def absent(name): ''' Ensure that the chronos job with the given name is not present. :param name: The app name :return: A standard Salt changes dictionary ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not __salt__['chronos.has_job'](name): ret['result'] = True ret['comment'] = 'Job {0} already absent'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Job {0} is set to be removed'.format(name) return ret if __salt__['chronos.rm_job'](name): ret['changes'] = {'job': name} ret['result'] = True ret['comment'] = 'Removed job {0}'.format(name) return ret else: ret['result'] = False ret['comment'] = 'Failed to remove job {0}'.format(name) return ret
saltstack/salt
salt/states/infoblox_range.py
present
python
def present(name=None, start_addr=None, end_addr=None, data=None, **api_opts): ''' Ensure range record is present. infoblox_range.present: start_addr: '129.97.150.160', end_addr: '129.97.150.170', Verbose state example: .. code-block:: yaml infoblox_range.present: data: { 'always_update_dns': False, 'authority': False, 'comment': 'range of IP addresses used for salt.. was used for ghost images deployment', 'ddns_generate_hostname': True, 'deny_all_clients': False, 'deny_bootp': False, 'disable': False, 'email_list': [], 'enable_ddns': False, 'enable_dhcp_thresholds': False, 'enable_email_warnings': False, 'enable_ifmap_publishing': False, 'enable_snmp_warnings': False, 'end_addr': '129.97.150.169', 'exclude': [], 'extattrs': {}, 'fingerprint_filter_rules': [], 'high_water_mark': 95, 'high_water_mark_reset': 85, 'ignore_dhcp_option_list_request': False, 'lease_scavenge_time': -1, 'logic_filter_rules': [], 'low_water_mark': 0, 'low_water_mark_reset': 10, 'mac_filter_rules': [], 'member': {'_struct': 'dhcpmember', 'ipv4addr': '129.97.128.9', 'name': 'cn-dhcp-mc.example.ca'}, 'ms_options': [], 'nac_filter_rules': [], 'name': 'ghost-range', 'network': '129.97.150.0/24', 'network_view': 'default', 'option_filter_rules': [], 'options': [{'name': 'dhcp-lease-time', 'num': 51, 'use_option': False, 'value': '43200', 'vendor_class': 'DHCP'}], 'recycle_leases': True, 'relay_agent_filter_rules': [], 'server_association_type': 'MEMBER', 'start_addr': '129.97.150.160', 'update_dns_on_lease_renewal': False, 'use_authority': False, 'use_bootfile': False, 'use_bootserver': False, 'use_ddns_domainname': False, 'use_ddns_generate_hostname': True, 'use_deny_bootp': False, 'use_email_list': False, 'use_enable_ddns': False, 'use_enable_dhcp_thresholds': False, 'use_enable_ifmap_publishing': False, 'use_ignore_dhcp_option_list_request': False, 'use_known_clients': False, 'use_lease_scavenge_time': False, 'use_nextserver': False, 'use_options': False, 'use_recycle_leases': False, 'use_unknown_clients': False, 'use_update_dns_on_lease_renewal': False } ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if not data: data = {} if 'name' not in data: data.update({'name': name}) if 'start_addr' not in data: data.update({'start_addr': start_addr}) if 'end_addr' not in data: data.update({'end_addr': end_addr}) obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts) if obj: diff = __salt__['infoblox.diff_objects'](data, obj) if not diff: ret['result'] = True ret['comment'] = 'supplied fields in correct state' return ret if diff: if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to update record' return ret new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts) ret['result'] = True ret['comment'] = 'record fields updated' ret['changes'] = {'diff': diff} return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to create record {0}'.format(name) return ret new_obj_ref = __salt__['infoblox.create_ipv4_range'](data, **api_opts) new_obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) ret['result'] = True ret['comment'] = 'record created' ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}} return ret
Ensure range record is present. infoblox_range.present: start_addr: '129.97.150.160', end_addr: '129.97.150.170', Verbose state example: .. code-block:: yaml infoblox_range.present: data: { 'always_update_dns': False, 'authority': False, 'comment': 'range of IP addresses used for salt.. was used for ghost images deployment', 'ddns_generate_hostname': True, 'deny_all_clients': False, 'deny_bootp': False, 'disable': False, 'email_list': [], 'enable_ddns': False, 'enable_dhcp_thresholds': False, 'enable_email_warnings': False, 'enable_ifmap_publishing': False, 'enable_snmp_warnings': False, 'end_addr': '129.97.150.169', 'exclude': [], 'extattrs': {}, 'fingerprint_filter_rules': [], 'high_water_mark': 95, 'high_water_mark_reset': 85, 'ignore_dhcp_option_list_request': False, 'lease_scavenge_time': -1, 'logic_filter_rules': [], 'low_water_mark': 0, 'low_water_mark_reset': 10, 'mac_filter_rules': [], 'member': {'_struct': 'dhcpmember', 'ipv4addr': '129.97.128.9', 'name': 'cn-dhcp-mc.example.ca'}, 'ms_options': [], 'nac_filter_rules': [], 'name': 'ghost-range', 'network': '129.97.150.0/24', 'network_view': 'default', 'option_filter_rules': [], 'options': [{'name': 'dhcp-lease-time', 'num': 51, 'use_option': False, 'value': '43200', 'vendor_class': 'DHCP'}], 'recycle_leases': True, 'relay_agent_filter_rules': [], 'server_association_type': 'MEMBER', 'start_addr': '129.97.150.160', 'update_dns_on_lease_renewal': False, 'use_authority': False, 'use_bootfile': False, 'use_bootserver': False, 'use_ddns_domainname': False, 'use_ddns_generate_hostname': True, 'use_deny_bootp': False, 'use_email_list': False, 'use_enable_ddns': False, 'use_enable_dhcp_thresholds': False, 'use_enable_ifmap_publishing': False, 'use_ignore_dhcp_option_list_request': False, 'use_known_clients': False, 'use_lease_scavenge_time': False, 'use_nextserver': False, 'use_options': False, 'use_recycle_leases': False, 'use_unknown_clients': False, 'use_update_dns_on_lease_renewal': False }
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/infoblox_range.py#L17-L140
null
# -*- coding: utf-8 -*- ''' Infoblox host record managment. functions accept api_opts: api_verifyssl: verify SSL [default to True or pillar value] api_url: server to connect to [default to pillar value] api_username: [default to pillar value] api_password: [default to pillar value] ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals def absent(name=None, start_addr=None, end_addr=None, data=None, **api_opts): ''' Ensure the range is removed Supplying the end of the range is optional. State example: .. code-block:: yaml infoblox_range.absent: - name: 'vlan10' infoblox_range.absent: - name: - start_addr: 127.0.1.20 ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if not data: data = {} if 'name' not in data: data.update({'name': name}) if 'start_addr' not in data: data.update({'start_addr': start_addr}) if 'end_addr' not in data: data.update({'end_addr': end_addr}) obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts) if not obj: ret['result'] = True ret['comment'] = 'already deleted' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to delete range' return ret if __salt__['infoblox.delete_object'](objref=obj['_ref']): ret['result'] = True ret['changes'] = {'old': 'Found {0} - {1}'.format(start_addr, end_addr), 'new': 'Removed'} return ret
saltstack/salt
salt/states/infoblox_range.py
absent
python
def absent(name=None, start_addr=None, end_addr=None, data=None, **api_opts): ''' Ensure the range is removed Supplying the end of the range is optional. State example: .. code-block:: yaml infoblox_range.absent: - name: 'vlan10' infoblox_range.absent: - name: - start_addr: 127.0.1.20 ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if not data: data = {} if 'name' not in data: data.update({'name': name}) if 'start_addr' not in data: data.update({'start_addr': start_addr}) if 'end_addr' not in data: data.update({'end_addr': end_addr}) obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts) if not obj: ret['result'] = True ret['comment'] = 'already deleted' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to delete range' return ret if __salt__['infoblox.delete_object'](objref=obj['_ref']): ret['result'] = True ret['changes'] = {'old': 'Found {0} - {1}'.format(start_addr, end_addr), 'new': 'Removed'} return ret
Ensure the range is removed Supplying the end of the range is optional. State example: .. code-block:: yaml infoblox_range.absent: - name: 'vlan10' infoblox_range.absent: - name: - start_addr: 127.0.1.20
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/infoblox_range.py#L143-L191
null
# -*- coding: utf-8 -*- ''' Infoblox host record managment. functions accept api_opts: api_verifyssl: verify SSL [default to True or pillar value] api_url: server to connect to [default to pillar value] api_username: [default to pillar value] api_password: [default to pillar value] ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals def present(name=None, start_addr=None, end_addr=None, data=None, **api_opts): ''' Ensure range record is present. infoblox_range.present: start_addr: '129.97.150.160', end_addr: '129.97.150.170', Verbose state example: .. code-block:: yaml infoblox_range.present: data: { 'always_update_dns': False, 'authority': False, 'comment': 'range of IP addresses used for salt.. was used for ghost images deployment', 'ddns_generate_hostname': True, 'deny_all_clients': False, 'deny_bootp': False, 'disable': False, 'email_list': [], 'enable_ddns': False, 'enable_dhcp_thresholds': False, 'enable_email_warnings': False, 'enable_ifmap_publishing': False, 'enable_snmp_warnings': False, 'end_addr': '129.97.150.169', 'exclude': [], 'extattrs': {}, 'fingerprint_filter_rules': [], 'high_water_mark': 95, 'high_water_mark_reset': 85, 'ignore_dhcp_option_list_request': False, 'lease_scavenge_time': -1, 'logic_filter_rules': [], 'low_water_mark': 0, 'low_water_mark_reset': 10, 'mac_filter_rules': [], 'member': {'_struct': 'dhcpmember', 'ipv4addr': '129.97.128.9', 'name': 'cn-dhcp-mc.example.ca'}, 'ms_options': [], 'nac_filter_rules': [], 'name': 'ghost-range', 'network': '129.97.150.0/24', 'network_view': 'default', 'option_filter_rules': [], 'options': [{'name': 'dhcp-lease-time', 'num': 51, 'use_option': False, 'value': '43200', 'vendor_class': 'DHCP'}], 'recycle_leases': True, 'relay_agent_filter_rules': [], 'server_association_type': 'MEMBER', 'start_addr': '129.97.150.160', 'update_dns_on_lease_renewal': False, 'use_authority': False, 'use_bootfile': False, 'use_bootserver': False, 'use_ddns_domainname': False, 'use_ddns_generate_hostname': True, 'use_deny_bootp': False, 'use_email_list': False, 'use_enable_ddns': False, 'use_enable_dhcp_thresholds': False, 'use_enable_ifmap_publishing': False, 'use_ignore_dhcp_option_list_request': False, 'use_known_clients': False, 'use_lease_scavenge_time': False, 'use_nextserver': False, 'use_options': False, 'use_recycle_leases': False, 'use_unknown_clients': False, 'use_update_dns_on_lease_renewal': False } ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if not data: data = {} if 'name' not in data: data.update({'name': name}) if 'start_addr' not in data: data.update({'start_addr': start_addr}) if 'end_addr' not in data: data.update({'end_addr': end_addr}) obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts) if obj: diff = __salt__['infoblox.diff_objects'](data, obj) if not diff: ret['result'] = True ret['comment'] = 'supplied fields in correct state' return ret if diff: if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to update record' return ret new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts) ret['result'] = True ret['comment'] = 'record fields updated' ret['changes'] = {'diff': diff} return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to create record {0}'.format(name) return ret new_obj_ref = __salt__['infoblox.create_ipv4_range'](data, **api_opts) new_obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) ret['result'] = True ret['comment'] = 'record created' ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}} return ret
saltstack/salt
salt/utils/vt_helper.py
SSHConnection.sendline
python
def sendline(self, cmd): ''' Send this command to the server and return a tuple of the output and the stderr. The format for parameters is: cmd (string): The command to send to the sever. ''' self.conn.sendline(cmd, self.linesep) # saw_prompt = False ret_stdout = [] ret_stderr = [] while self.conn.has_unread_data: stdout, stderr = self.conn.recv() if stdout: ret_stdout.append(stdout) if stderr: log.debug('Error while executing command.') ret_stderr.append(stderr) if stdout and self.prompt_re.search(stdout): break return ''.join(ret_stdout), ''.join(ret_stderr)
Send this command to the server and return a tuple of the output and the stderr. The format for parameters is: cmd (string): The command to send to the sever.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vt_helper.py#L107-L133
null
class SSHConnection(object): ''' SSH Connection to a remote server. ''' def __init__(self, username='salt', password='password', host='localhost', key_accept=False, prompt=r'(Cmd)', passwd_retries=3, linesep=os.linesep, ssh_args=''): ''' Establishes a connection to the remote server. The format for parameters is: username (string): The username to use for this ssh connection. Defaults to root. password (string): The password to use for this ssh connection. Defaults to password. host (string): The host to connect to. Defaults to localhost. key_accept (boolean): Should we accept this host's key and add it to the known_hosts file? Defaults to False. prompt (string): The shell prompt (regex) on the server. Prompt is compiled into a regular expression. Defaults to (Cmd) passwd_retries (int): How many times should I try to send the password? Defaults to 3. linesep (string): The line separator to use when sending commands to the server. Defaults to os.linesep. ssh_args (string): Extra ssh args to use with ssh. Example: '-o PubkeyAuthentication=no' ''' self.conn = Terminal( 'ssh {0} -l {1} {2}'.format(ssh_args, username, host), shell=True, log_stdout=True, log_stdout_level='trace', log_stderr=True, log_stderr_level='trace', stream_stdout=False, stream_stderr=False) sent_passwd = 0 self.prompt_re = re.compile(prompt) self.linesep = linesep while self.conn.has_unread_data: stdout, stderr = self.conn.recv() if stdout and SSH_PASSWORD_PROMPT_RE.search(stdout): if not password: log.error('Failure while authentication.') raise TerminalException( 'Permission denied, no authentication information') if sent_passwd < passwd_retries: self.conn.sendline(password, self.linesep) sent_passwd += 1 continue else: # asking for a password, and we can't seem to send it raise TerminalException('Password authentication failed') elif stdout and KEY_VALID_RE.search(stdout): # Connecting to this server for the first time # and need to accept key if key_accept: log.info('Adding %s to known_hosts', host) self.conn.sendline('yes') continue else: self.conn.sendline('no') elif stdout and self.prompt_re.search(stdout): # Auth success! # We now have a prompt break def close_connection(self): ''' Close the server connection ''' self.conn.close(terminate=True, kill=True)
saltstack/salt
salt/utils/vmware.py
esxcli
python
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret
Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L125-L179
[ "def run_all(cmd,\n cwd=None,\n stdin=None,\n runas=None,\n group=None,\n shell=DEFAULT_SHELL,\n python_shell=None,\n env=None,\n clean_env=False,\n template=None,\n rstrip=True,\n umask=None,\n output_encoding=None,\n output_loglevel='debug',\n log_callback=None,\n hide_output=False,\n timeout=None,\n reset_system_locale=True,\n ignore_retcode=False,\n saltenv='base',\n use_vt=False,\n redirect_stderr=False,\n password=None,\n encoded_cmd=False,\n prepend_path=None,\n success_retcodes=None,\n success_stdout=None,\n success_stderr=None,\n **kwargs):\n '''\n Execute the passed command and return a dict of return data\n\n :param str cmd: The command to run. ex: ``ls -lart /home``\n\n :param str cwd: The directory from which to execute the command. Defaults\n to the home directory of the user specified by ``runas`` (or the user\n under which Salt is running if ``runas`` is not specified).\n\n :param str stdin: A string of standard input can be specified for the\n command to be run using the ``stdin`` parameter. This can be useful in\n cases where sensitive information must be read from standard input.\n\n :param str runas: Specify an alternate user to run the command. The default\n behavior is to run as the user under which Salt is running. If running\n on a Windows minion you must also use the ``password`` argument, and\n the target user account must be in the Administrators group.\n\n .. warning::\n\n For versions 2018.3.3 and above on macosx while using runas,\n to pass special characters to the command you need to escape\n the characters on the shell.\n\n Example:\n\n .. code-block:: bash\n\n cmd.run_all 'echo '\\\\''h=\\\\\"baz\\\\\"'\\\\\\''' runas=macuser\n\n :param str password: Windows only. Required when specifying ``runas``. This\n parameter will be ignored on non-Windows platforms.\n\n .. versionadded:: 2016.3.0\n\n :param str group: Group to run command as. Not currently supported\n on Windows.\n\n :param str shell: Specify an alternate shell. Defaults to the system's\n default shell.\n\n :param bool python_shell: If False, let python handle the positional\n arguments. Set to True to use shell features, such as pipes or\n redirection.\n\n :param dict env: Environment variables to be set prior to execution.\n\n .. note::\n When passing environment variables on the CLI, they should be\n passed as the string representation of a dictionary.\n\n .. code-block:: bash\n\n salt myminion cmd.run_all 'some command' env='{\"FOO\": \"bar\"}'\n\n :param bool clean_env: Attempt to clean out all other shell environment\n variables and set only those provided in the 'env' argument to this\n function.\n\n :param str prepend_path: $PATH segment to prepend (trailing ':' not\n necessary) to $PATH\n\n .. versionadded:: 2018.3.0\n\n :param str template: If this setting is applied then the named templating\n engine will be used to render the downloaded file. Currently jinja,\n mako, and wempy are supported.\n\n :param bool rstrip: Strip all whitespace off the end of output before it is\n returned.\n\n :param str umask: The umask (in octal) to use when running the command.\n\n :param str output_encoding: Control the encoding used to decode the\n command's output.\n\n .. note::\n This should not need to be used in most cases. By default, Salt\n will try to use the encoding detected from the system locale, and\n will fall back to UTF-8 if this fails. This should only need to be\n used in cases where the output of the command is encoded in\n something other than the system locale or UTF-8.\n\n To see the encoding Salt has detected from the system locale, check\n the `locale` line in the output of :py:func:`test.versions_report\n <salt.modules.test.versions_report>`.\n\n .. versionadded:: 2018.3.0\n\n :param str output_loglevel: Control the loglevel at which the output from\n the command is logged to the minion log.\n\n .. note::\n The command being run will still be logged at the ``debug``\n loglevel regardless, unless ``quiet`` is used for this value.\n\n :param bool ignore_retcode: If the exit code of the command is nonzero,\n this is treated as an error condition, and the output from the command\n will be logged to the minion log. However, there are some cases where\n programs use the return code for signaling and a nonzero exit code\n doesn't necessarily mean failure. Pass this argument as ``True`` to\n skip logging the output if the command has a nonzero exit code.\n\n :param bool hide_output: If ``True``, suppress stdout and stderr in the\n return data.\n\n .. note::\n This is separate from ``output_loglevel``, which only handles how\n Salt logs to the minion log.\n\n .. versionadded:: 2018.3.0\n\n :param int timeout: A timeout in seconds for the executed process to\n return.\n\n :param bool use_vt: Use VT utils (saltstack) to stream the command output\n more interactively to the console and the logs. This is experimental.\n\n :param bool encoded_cmd: Specify if the supplied command is encoded.\n Only applies to shell 'powershell'.\n\n .. versionadded:: 2018.3.0\n\n :param bool redirect_stderr: If set to ``True``, then stderr will be\n redirected to stdout. This is helpful for cases where obtaining both\n the retcode and output is desired, but it is not desired to have the\n output separated into both stdout and stderr.\n\n .. versionadded:: 2015.8.2\n\n :param str password: Windows only. Required when specifying ``runas``. This\n parameter will be ignored on non-Windows platforms.\n\n .. versionadded:: 2016.3.0\n\n :param bool bg: If ``True``, run command in background and do not await or\n deliver its results\n\n .. versionadded:: 2016.3.6\n\n :param list success_retcodes: This parameter will be allow a list of\n non-zero return codes that should be considered a success. If the\n return code returned from the run matches any in the provided list,\n the return code will be overridden with zero.\n\n .. versionadded:: 2019.2.0\n\n :param list success_stdout: This parameter will be allow a list of\n strings that when found in standard out should be considered a success.\n If stdout returned from the run matches any in the provided list,\n the return code will be overridden with zero.\n\n .. versionadded:: Neon\n\n :param list success_stderr: This parameter will be allow a list of\n strings that when found in standard error should be considered a success.\n If stderr returned from the run matches any in the provided list,\n the return code will be overridden with zero.\n\n .. versionadded:: Neon\n\n :param bool stdin_raw_newlines: False\n If ``True``, Salt will not automatically convert the characters ``\\\\n``\n present in the ``stdin`` value to newlines.\n\n .. versionadded:: 2019.2.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cmd.run_all \"ls -l | awk '/foo/{print \\\\$2}'\"\n\n The template arg can be set to 'jinja' or another supported template\n engine to render the command arguments before execution.\n For example:\n\n .. code-block:: bash\n\n salt '*' cmd.run_all template=jinja \"ls -l /tmp/{{grains.id}} | awk '/foo/{print \\\\$2}'\"\n\n A string of standard input can be specified for the command to be run using\n the ``stdin`` parameter. This can be useful in cases where sensitive\n information must be read from standard input.\n\n .. code-block:: bash\n\n salt '*' cmd.run_all \"grep f\" stdin='one\\\\ntwo\\\\nthree\\\\nfour\\\\nfive\\\\n'\n '''\n python_shell = _python_shell_default(python_shell,\n kwargs.get('__pub_jid', ''))\n stderr = subprocess.STDOUT if redirect_stderr else subprocess.PIPE\n ret = _run(cmd,\n runas=runas,\n group=group,\n cwd=cwd,\n stdin=stdin,\n stderr=stderr,\n shell=shell,\n python_shell=python_shell,\n env=env,\n clean_env=clean_env,\n prepend_path=prepend_path,\n template=template,\n rstrip=rstrip,\n umask=umask,\n output_encoding=output_encoding,\n output_loglevel=output_loglevel,\n log_callback=log_callback,\n timeout=timeout,\n reset_system_locale=reset_system_locale,\n ignore_retcode=ignore_retcode,\n saltenv=saltenv,\n use_vt=use_vt,\n password=password,\n encoded_cmd=encoded_cmd,\n success_retcodes=success_retcodes,\n success_stdout=success_stdout,\n success_stderr=success_stderr,\n **kwargs)\n\n if hide_output:\n ret['stdout'] = ret['stderr'] = ''\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
_get_service_instance
python
def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance
Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L182-L278
[ "def get_gssapi_token(principal, host, domain):\n '''\n Get the gssapi token for Kerberos connection\n\n principal\n The service principal\n host\n Host url where we would like to authenticate\n domain\n Kerberos user domain\n '''\n\n if not HAS_GSSAPI:\n raise ImportError('The gssapi library is not imported.')\n\n service = '{0}/{1}@{2}'.format(principal, host, domain)\n log.debug('Retrieving gsspi token for service %s', service)\n service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)\n ctx = gssapi.InitContext(service_name)\n in_token = None\n while not ctx.established:\n out_token = ctx.step(in_token)\n if out_token:\n if six.PY2:\n return base64.b64encode(out_token)\n return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))\n if ctx.established:\n break\n if not in_token:\n raise salt.exceptions.CommandExecutionError(\n 'Can\\'t receive token, no response from server')\n raise salt.exceptions.CommandExecutionError(\n 'Context established, but didn\\'t receive token')\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_customizationspec_ref
python
def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name
Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L281-L293
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_service_instance
python
def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi``
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L318-L411
[ "def _get_service_instance(host, username, password, protocol,\n port, mechanism, principal, domain):\n '''\n Internal method to authenticate with a vCenter server or ESX/ESXi host\n and return the service instance object.\n '''\n log.trace('Retrieving new service instance')\n token = None\n if mechanism == 'userpass':\n if username is None:\n raise salt.exceptions.CommandExecutionError(\n 'Login mechanism userpass was specified but the mandatory '\n 'parameter \\'username\\' is missing')\n if password is None:\n raise salt.exceptions.CommandExecutionError(\n 'Login mechanism userpass was specified but the mandatory '\n 'parameter \\'password\\' is missing')\n elif mechanism == 'sspi':\n if principal is not None and domain is not None:\n try:\n token = get_gssapi_token(principal, host, domain)\n except Exception as exc:\n raise salt.exceptions.VMwareConnectionError(six.text_type(exc))\n else:\n err_msg = 'Login mechanism \\'{0}\\' was specified but the' \\\n ' mandatory parameters are missing'.format(mechanism)\n raise salt.exceptions.CommandExecutionError(err_msg)\n else:\n raise salt.exceptions.CommandExecutionError(\n 'Unsupported mechanism: \\'{0}\\''.format(mechanism))\n try:\n log.trace('Connecting using the \\'%s\\' mechanism, with username \\'%s\\'',\n mechanism, username)\n service_instance = SmartConnect(\n host=host,\n user=username,\n pwd=password,\n protocol=protocol,\n port=port,\n b64token=token,\n mechanism=mechanism)\n except TypeError as exc:\n if 'unexpected keyword argument' in exc.message:\n log.error('Initial connect to the VMware endpoint failed with %s', exc.message)\n log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')\n log.error('We recommend updating to that version or later.')\n raise\n except Exception as exc: # pylint: disable=broad-except\n # pyVmomi's SmartConnect() actually raises Exception in some cases.\n default_msg = 'Could not connect to host \\'{0}\\'. ' \\\n 'Please check the debug log for more information.'.format(host)\n\n try:\n if (isinstance(exc, vim.fault.HostConnectFault) and\n '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \\\n '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):\n service_instance = SmartConnect(\n host=host,\n user=username,\n pwd=password,\n protocol=protocol,\n port=port,\n sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),\n b64token=token,\n mechanism=mechanism)\n else:\n log.exception(exc)\n err_msg = exc.msg if hasattr(exc, 'msg') else default_msg\n raise salt.exceptions.VMwareConnectionError(err_msg)\n except Exception as exc: # pylint: disable=broad-except\n # pyVmomi's SmartConnect() actually raises Exception in some cases.\n if 'certificate verify failed' in six.text_type(exc):\n context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n context.verify_mode = ssl.CERT_NONE\n try:\n service_instance = SmartConnect(\n host=host,\n user=username,\n pwd=password,\n protocol=protocol,\n port=port,\n sslContext=context,\n b64token=token,\n mechanism=mechanism\n )\n except Exception as exc:\n log.exception(exc)\n err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)\n raise salt.exceptions.VMwareConnectionError(\n 'Could not connect to host \\'{0}\\': '\n '{1}'.format(host, err_msg))\n else:\n err_msg = exc.msg if hasattr(exc, 'msg') else default_msg\n log.trace(exc)\n raise salt.exceptions.VMwareConnectionError(err_msg)\n atexit.register(Disconnect, service_instance)\n return service_instance\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_new_service_instance_stub
python
def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub
Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L414-L454
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_service_instance_from_managed_object
python
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si
Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L457-L472
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
disconnect
python
def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg)
Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L475-L495
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
is_connection_to_a_vcenter
python
def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type))
Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L498-L527
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_service_info
python
def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg)
Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L530-L549
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
_get_dvs
python
def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None
Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L552-L568
[ "def list_dvs(service_instance):\n '''\n Returns a list of distributed virtual switches associated with a given service instance.\n\n service_instance\n The Service Instance Object from which to obtain distributed virtual switches.\n '''\n return list_objects(service_instance, vim.DistributedVirtualSwitch)\n", "def get_inventory(service_instance):\n '''\n Return the inventory of a Service Instance Object.\n\n service_instance\n The Service Instance Object for which to obtain inventory.\n '''\n return service_instance.RetrieveContent()\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
_get_dvs_portgroup
python
def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None
Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L593-L605
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
_get_dvs_uplink_portgroup
python
def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None
Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L608-L620
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_gssapi_token
python
def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token')
Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L623-L655
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_hardware_grains
python
def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data
Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L658-L719
[ "def get_inventory(service_instance):\n '''\n Return the inventory of a Service Instance Object.\n\n service_instance\n The Service Instance Object for which to obtain inventory.\n '''\n return service_instance.RetrieveContent()\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_root_folder
python
def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg)
Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L732-L752
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_content
python
def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content
Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L755-L874
[ "def get_root_folder(service_instance):\n '''\n Returns the root folder of a vCenter.\n\n service_instance\n The Service Instance Object for which to obtain the root folder.\n '''\n try:\n log.trace('Retrieving root folder')\n return service_instance.RetrieveContent().rootFolder\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_mor_by_property
python
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None
Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L877-L906
[ "def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_mors_with_properties
python
def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list
Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L909-L960
[ "def get_content(service_instance, obj_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns the content of the specified type of object for a Service Instance.\n\n For more information, please see:\n http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html\n\n service_instance\n The Service Instance from which to obtain content.\n\n obj_type\n The type of content to obtain.\n\n property_list\n An optional list of object properties to used to return even more filtered content results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec.\n\n local_properties\n Flag specifying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Start at the rootFolder if container starting point not specified\n if not container_ref:\n container_ref = get_root_folder(service_instance)\n\n # By default, the object reference used as the starting poing for the filter\n # is the container_ref passed in the function\n obj_ref = container_ref\n local_traversal_spec = False\n if not traversal_spec and not local_properties:\n local_traversal_spec = True\n # We don't have a specific traversal spec override so we are going to\n # get everything using a container view\n try:\n obj_ref = service_instance.content.viewManager.CreateContainerView(\n container_ref, [obj_type], True)\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n\n # Create 'Traverse All' traversal spec to determine the path for\n # collection\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n name='traverseEntities',\n path='view',\n skip=False,\n type=vim.view.ContainerView\n )\n\n # Create property spec to determine properties to be retrieved\n property_spec = vmodl.query.PropertyCollector.PropertySpec(\n type=obj_type,\n all=True if not property_list else False,\n pathSet=property_list\n )\n\n # Create object spec to navigate content\n obj_spec = vmodl.query.PropertyCollector.ObjectSpec(\n obj=obj_ref,\n skip=True if not local_properties else False,\n selectSet=[traversal_spec] if not local_properties else None\n )\n\n # Create a filter spec and specify object, property spec in it\n filter_spec = vmodl.query.PropertyCollector.FilterSpec(\n objectSet=[obj_spec],\n propSet=[property_spec],\n reportMissingObjectsInResults=False\n )\n\n # Retrieve the contents\n try:\n content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n\n # Destroy the object view\n if local_traversal_spec:\n try:\n obj_ref.Destroy()\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n\n return content\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_properties_of_managed_object
python
def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0]
Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L963-L996
[ "def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n", "def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n" ]
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_network_adapter_type
python
def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.')
Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1011-L1029
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
saltstack/salt
salt/utils/vmware.py
get_network_adapter_object_type
python
def get_network_adapter_object_type(adapter_object): ''' Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type. ''' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2): return 'vmxnet2' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3): return 'vmxnet3' if isinstance(adapter_object, vim.vm.device.VirtualVmxnet): return 'vmxnet' if isinstance(adapter_object, vim.vm.device.VirtualE1000e): return 'e1000e' if isinstance(adapter_object, vim.vm.device.VirtualE1000): return 'e1000' raise ValueError('An unknown network adapter object type.')
Returns the network adapter type. adapter_object The adapter object from which to obtain the network adapter type.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1032-L1050
null
# -*- coding: utf-8 -*- ''' Connection library for VMware .. versionadded:: 2015.8.2 This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok <nmadhok@clemson.edu> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com> Dependencies ~~~~~~~~~~~~ - pyVmomi Python Module - ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other functions in this module rely on ESXCLI. pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file was developed against. ESXCLI ------ This dependency is only needed to use the ``esxcli`` function. At the time of this writing, no other functions in this module rely on ESXCLI. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import logging import time import sys import ssl # Import Salt Libs import salt.exceptions import salt.modules.cmdmod import salt.utils.path import salt.utils.platform import salt.utils.stringutils # Import Third Party Libs from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ SoapStubAdapter from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: import gssapi import base64 HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): ''' Only load if PyVmomi is installed. ''' if HAS_PYVMOMI: return True return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.' def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None): ''' Shell out and call the specified esxcli commmand, parse the result and return something sane. :param host: ESXi or vCenter host to connect to :param user: User to connect as, usually root :param pwd: Password to connect with :param port: TCP port :param cmd: esxcli command and arguments :param esxi_host: If `host` is a vCenter host, then esxi_host is the ESXi machine on which to execute this command :param credstore: Optional path to the credential store file :return: Dictionary ''' esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False # Set default port and protocol if none are provided. if port is None: port = 443 if protocol is None: protocol = 'https' if credstore: esx_cmd += ' --credstore \'{0}\''.format(credstore) if not esxi_host: # Then we are connecting directly to an ESXi server, # 'host' points at that server, and esxi_host is a reference to the # ESXi instance we are manipulating esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \ '--protocol={3} --portnumber={4} {5}'.format(host, user, pwd, protocol, port, cmd) else: esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \ '--protocol={4} --portnumber={5} {6}'.format(host, esxi_host, user, pwd, protocol, port, cmd) ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet') return ret def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance def get_customizationspec_ref(si, customization_spec_name): ''' Get a reference to a VMware customization spec for the purposes of customizing a clone si ServiceInstance for the vSphere or ESXi server (see get_service_instance) customization_spec_name Name of the customization spec ''' customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name) return customization_spec_name def get_mor_using_container_view(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_service_instance(host, username=None, password=None, protocol=None, port=None, mechanism='userpass', principal=None, domain=None): ''' Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. host The location of the vCenter server or ESX/ESXi host. username The username used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` password The password used to login to the vCenter server or ESX/ESXi host. Required if mechanism is ``userpass`` protocol Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the vCenter server or ESX/ESXi host is not using the default port. Default port is ``443``. mechanism pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``. Default mechanism is ``userpass``. principal Kerberos service principal. Required if mechanism is ``sspi`` domain Kerberos user domain. Required if mechanism is ``sspi`` ''' if protocol is None: protocol = 'https' if port is None: port = 443 service_instance = GetSi() if service_instance: stub = GetStub() if (salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, six.text_type(port)]))): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak # and reconnect Disconnect(service_instance) service_instance = None else: return service_instance if not service_instance: service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) # Test if data can actually be retrieved or connection has gone stale log.trace('Checking connection is still authenticated') try: service_instance.CurrentTime() except vim.fault.NotAuthenticated: log.trace('Session no longer authenticating. Reconnecting') Disconnect(service_instance) service_instance = _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'): ''' Retrieves the service instance from a managed object. me_ref Reference to a managed object (of type vim.ManagedEntity). name Name of managed object. This field is optional. ''' if not name: name = mo_ref.name log.trace('[%s] Retrieving service instance from managed object', name) si = vim.ServiceInstance('ServiceInstance') si._stub = mo_ref._stub return si def disconnect(service_instance): ''' Function that disconnects from the vCenter server or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' log.trace('Disconnecting') try: Disconnect(service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def is_connection_to_a_vcenter(service_instance): ''' Function that returns True if the connection is made to a vCenter Server and False if the connection is made to an ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: api_type = service_instance.content.about.apiType except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = %s', api_type) if api_type == 'VirtualCenter': return True elif api_type == 'HostAgent': return False else: raise salt.exceptions.VMwareApiError( 'Unexpected api type \'{0}\' . Supported types: ' '\'VirtualCenter/HostAgent\''.format(api_type)) def get_service_info(service_instance): ''' Returns information of the vCenter or ESXi host service_instance The Service Instance from which to obtain managed object references. ''' try: return service_instance.content.about except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. :param service_instance: PyVmomi service instance :param dvs_name: Name of DVS to return :return: A PyVmomi DVS object ''' switches = list_dvs(service_instance) if dvs_name in switches: inventory = get_inventory(service_instance) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True) for item in container.view: if item.name == dvs_name: return item return None def _get_pnics(host_reference): ''' Helper function that returns a list of PhysicalNics and their information. ''' return host_reference.config.network.pnic def _get_vnics(host_reference): ''' Helper function that returns a list of VirtualNics and their information. ''' return host_reference.config.network.vnic def _get_vnic_manager(host_reference): ''' Helper function that returns a list of Virtual NicManagers and their information. ''' return host_reference.configManager.virtualNicManager def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def _get_dvs_uplink_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None def get_gssapi_token(principal, host, domain): ''' Get the gssapi token for Kerberos connection principal The service principal host Host url where we would like to authenticate domain Kerberos user domain ''' if not HAS_GSSAPI: raise ImportError('The gssapi library is not imported.') service = '{0}/{1}@{2}'.format(principal, host, domain) log.debug('Retrieving gsspi token for service %s', service) service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME) ctx = gssapi.InitContext(service_name) in_token = None while not ctx.established: out_token = ctx.step(in_token) if out_token: if six.PY2: return base64.b64encode(out_token) return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: raise salt.exceptions.CommandExecutionError( 'Can\'t receive token, no response from server') raise salt.exceptions.CommandExecutionError( 'Context established, but didn\'t receive token') def get_hardware_grains(service_instance): ''' Return hardware info for standard minion grains if the service_instance is a HostAgent type service_instance The service instance object to get hardware info for .. versionadded:: 2016.11.0 ''' hw_grain_data = {} if get_inventory(service_instance).about.apiType == 'HostAgent': view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder, [vim.HostSystem], True) if view and view.view: hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo: if _data.identifierType.key == 'ServiceTag': hw_grain_data['serialnumber'] = _data.identifierValue hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor hw_grain_data['osrelease'] = view.view[0].summary.config.product.version hw_grain_data['osbuild'] = view.view[0].summary.config.product.build hw_grain_data['os_family'] = view.view[0].summary.config.product.name hw_grain_data['os'] = view.view[0].summary.config.product.name hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024 hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y') hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores'] hw_grain_data['ip_interfaces'] = {} hw_grain_data['ip4_interfaces'] = {} hw_grain_data['ip6_interfaces'] = {} hw_grain_data['hwaddr_interfaces'] = {} for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic: hw_grain_data['ip_interfaces'][_vnic.device] = [] hw_grain_data['ip4_interfaces'][_vnic.device] = [] hw_grain_data['ip6_interfaces'][_vnic.device] = [] hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress) if _vnic.spec.ip.ipV6Config: hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address) hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName hw_grain_data['fqdn'] = '{0}{1}{2}'.format( view.view[0].configManager.networkSystem.dnsConfig.hostName, ('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''), view.view[0].configManager.networkSystem.dnsConfig.domainName) for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic: hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name view = None return hw_grain_data def get_inventory(service_instance): ''' Return the inventory of a Service Instance Object. service_instance The Service Instance Object for which to obtain inventory. ''' return service_instance.RetrieveContent() def get_root_folder(service_instance): ''' Returns the root folder of a vCenter. service_instance The Service Instance Object for which to obtain the root folder. ''' try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): ''' Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. ''' # Get list of all managed object references with specified property object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns a list containing properties and managed object references for the managed object. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_list An optional list of object properties used to return even more filtered managed object reference results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec local_properties Flag specigying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Get all the content content_args = [service_instance, object_type] content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties} try: content = get_content(*content_args, **content_kwargs) except BadStatusLine: content = get_content(*content_args, **content_kwargs) except IOError as exc: if exc.errno != errno.EPIPE: raise exc content = get_content(*content_args, **content_kwargs) object_list = [] for obj in content: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val properties['object'] = obj.obj object_list.append(properties) log.trace('Retrieved %s objects', len(object_list)) return object_list def get_properties_of_managed_object(mo_ref, properties): ''' Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve. ''' service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0] def get_managed_object_name(mo_ref): ''' Returns the name of a managed object. If the name wasn't found, it returns None. mo_ref The managed object reference. ''' props = get_properties_of_managed_object(mo_ref, ['name']) return props.get('name') def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.') def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): ''' Returns distributed virtual switches (DVSs) in a datacenter. dc_ref The parent datacenter reference. dvs_names The names of the DVSs to return. Default is None. get_all_dvss Return all DVSs in the datacenter. Default is False. ''' dc_name = get_managed_object_name(dc_ref) log.trace( 'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s', dc_name, ','.join(dvs_names) if dvs_names else None, get_all_dvss ) properties = ['name'] traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) service_instance = get_service_instance_from_managed_object(dc_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualSwitch, container_ref=dc_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_dvss or (dvs_names and i['name'] in dvs_names)] return items def get_network_folder(dc_ref): ''' Retrieves the network folder of a datacenter ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving network folder in datacenter \'%s\'', dc_name) service_instance = get_service_instance_from_managed_object(dc_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=False, type=vim.Datacenter) entries = get_mors_with_properties(service_instance, vim.Folder, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if not entries: raise salt.exceptions.VMwareObjectRetrievalError( 'Network folder in datacenter \'{0}\' wasn\'t retrieved' ''.format(dc_name)) return entries[0]['object'] def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): ''' Creates a distributed virtual switches (DVS) in a datacenter. Returns the reference to the newly created distributed virtual switch. dc_ref The parent datacenter reference. dvs_name The name of the DVS to create. dvs_create_spec The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. Default is None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name) if not dvs_create_spec: dvs_create_spec = vim.DVSCreateSpec() if not dvs_create_spec.configSpec: dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() dvs_create_spec.configSpec.name = dvs_name netw_folder_ref = get_network_folder(dc_ref) try: task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def set_dvs_network_resource_management_enabled(dvs_ref, enabled): ''' Sets whether NIOC is enabled on a DVS. dvs_ref The DVS reference. enabled Flag specifying whether NIOC is enabled. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Setting network resource management enable to %s on ' 'dvs \'%s\'', enabled, dvs_name) try: dvs_ref.EnableNetworkResourceManagement(enable=enabled) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items def get_uplink_dvportgroup(dvs_ref): ''' Returns the uplink distributed virtual portgroup of a distributed virtual switch (dvs) dvs_ref The dvs reference ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(dvs_ref) items = [entry['object'] for entry in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=dvs_ref, property_list=['tag'], traversal_spec=traversal_spec) if entry['tag'] and [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) return items[0] def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__)) def update_dvportgroup(portgroup_ref, spec): ''' Updates a distributed virtual portgroup portgroup_ref The portgroup reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Updating portgrouo %s', pg_name) try: task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def remove_dvportgroup(portgroup_ref): ''' Removes a distributed virtual portgroup portgroup_ref The portgroup reference ''' pg_name = get_managed_object_name(portgroup_ref) log.trace('Removing portgroup %s', pg_name) try: task = portgroup_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, pg_name, six.text_type(task.__class__)) def get_networks(parent_ref, network_names=None, get_all_networks=False): ''' Returns networks of standard switches. The parent object can be a datacenter. parent_ref The parent object reference. A datacenter object. network_names The name of the standard switch networks. Default is None. get_all_networks Boolean indicates whether to return all networks in the parent. Default is False. ''' if not isinstance(parent_ref, vim.Datacenter): raise salt.exceptions.ArgumentValueError( 'Parent has to be a datacenter.') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s', type(parent_ref).__name__, parent_name, ','.join(network_names) if network_names else None, get_all_networks) properties = ['name'] service_instance = get_service_instance_from_managed_object(parent_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Network, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_networks or (network_names and i['name'] in network_names)] return items def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. service_instance The Service Instance for which to obtain a list of objects. object_type The type of content for which to obtain information. properties An optional list of object properties used to return reference results. If not provided, defaults to ``name``. ''' if properties is None: properties = ['name'] items = [] item_list = get_mors_with_properties(service_instance, vim_object, properties) for item in item_list: items.append(item['name']) return items def get_license_manager(service_instance): ''' Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license manager') try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager def get_license_assignment_manager(service_instance): ''' Returns the license assignment manager. service_instance The Service Instance Object from which to obrain the license manager. ''' log.debug('Retrieving license assignment manager') try: lic_assignment_manager = \ service_instance.content.licenseManager.licenseAssignmentManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not lic_assignment_manager: raise salt.exceptions.VMwareObjectRetrievalError( 'License assignment manager was not retrieved') return lic_assignment_manager def get_licenses(service_instance, license_manager=None): ''' Returns the licenses on a specific instance. service_instance The Service Instance Object from which to obrain the licenses. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) log.debug('Retrieving licenses') try: return license_manager.licenses except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def add_license(service_instance, key, description, license_manager=None): ''' Adds a license. service_instance The Service Instance Object. key The key of the license to add. description The description of the license to add. license_manager The License Manager object of the service instance. If not provided it will be retrieved. ''' if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() label.key = 'VpxClientLicenseLabel' label.value = description log.debug('Adding license \'%s\'', description) try: vmware_license = license_manager.AddLicense(key, [label]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError('No entity_name passed') # If entity_ref is not defined, then interested in the vcenter entity_id = None entity_type = 'moid' check_name = False if not entity_ref: if entity_name: check_name = True entity_type = 'uuid' try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace('Retrieving licenses assigned to \'%s\'', entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == 'uuid' and len(assignments) > 1: log.trace('Unexpectectedly retrieved more than one' ' VCenter license assignment.') raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: %s != %s', entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( 'Got license assignment info for a different vcenter') return [a.assignedLicense for a in assignments] def assign_license(service_instance, license_key, license_name, entity_ref=None, entity_name=None, license_assignment_manager=None): ''' Assigns a license to an entity. service_instance The Service Instance Object from which to obrain the licenses. license_key The key of the license to add. license_name The description of the license to add. entity_ref VMware entity to assign the license to. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved Default is None. ''' if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) entity_id = None if not entity_ref: # vcenter try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg) if not entity_name: entity_name = 'vCenter' else: # e.g. vsan cluster or host entity_id = entity_ref._moId log.trace('Assigning license to \'%s\'', entity_name) try: vmware_license = license_assignment_manager.UpdateAssignedLicense( entity_id, license_key, license_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return vmware_license def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. service_instance The Service Instance Object from which to obtain datacenters. ''' return list_objects(service_instance, vim.Datacenter) def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): ''' Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None. ''' items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items def get_datacenter(service_instance, datacenter_name): ''' Returns a vim.Datacenter managed object. service_instance The Service Instance Object from which to obtain datacenter. datacenter_name The datacenter name ''' items = get_datacenters(service_instance, datacenter_names=[datacenter_name]) if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Datacenter \'{0}\' was not found'.format(datacenter_name)) return items[0] def create_datacenter(service_instance, datacenter_name): ''' Creates a datacenter. .. versionadded:: 2017.7.0 service_instance The Service Instance Object datacenter_name The datacenter name ''' root_folder = get_root_folder(service_instance) log.trace('Creating datacenter \'%s\'', datacenter_name) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj def get_cluster(dc_ref, cluster): ''' Returns a cluster in a datacenter. dc_ref The datacenter reference cluster The cluster to be retrieved ''' dc_name = get_managed_object_name(dc_ref) log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'', cluster, dc_name) si = get_service_instance_from_managed_object(dc_ref, name=dc_name) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='hostFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) items = [i['object'] for i in get_mors_with_properties(si, vim.ClusterComputeResource, container_ref=dc_ref, property_list=['name'], traversal_spec=traversal_spec) if i['name'] == cluster] if not items: raise salt.exceptions.VMwareObjectRetrievalError( 'Cluster \'{0}\' was not found in datacenter ' '\'{1}\''. format(cluster, dc_name)) return items[0] def create_cluster(dc_ref, cluster_name, cluster_spec): ''' Creates a cluster in a datacenter. dc_ref The parent datacenter reference. cluster_name The cluster name. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' dc_name = get_managed_object_name(dc_ref) log.trace('Creating cluster \'%s\' in datacenter \'%s\'', cluster_name, dc_name) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def update_cluster(cluster_ref, cluster_spec): ''' Updates a cluster in a datacenter. cluster_ref The cluster reference. cluster_spec The cluster spec (vim.ClusterConfigSpecEx). Defaults to None. ''' cluster_name = get_managed_object_name(cluster_ref) log.trace('Updating cluster \'%s\'', cluster_name) try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') def list_clusters(service_instance): ''' Returns a list of clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain clusters. ''' return list_objects(service_instance, vim.ClusterComputeResource) def list_datastore_clusters(service_instance): ''' Returns a list of datastore clusters associated with a given service instance. service_instance The Service Instance Object from which to obtain datastore clusters. ''' return list_objects(service_instance, vim.StoragePod) def list_datastores(service_instance): ''' Returns a list of datastores associated with a given service instance. service_instance The Service Instance Object from which to obtain datastores. ''' return list_objects(service_instance, vim.Datastore) def list_datastores_full(service_instance): ''' Returns a list of datastores associated with a given service instance. The list contains basic information about the datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. ''' datastores_list = list_objects(service_instance, vim.Datastore) datastores = {} for datastore in datastores_list: datastores[datastore] = list_datastore_full(service_instance, datastore) return datastores def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items def get_mor_by_name(si, obj_type, obj_name): ''' Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec): ''' Get the files with a given browser specification from the datastore. service_instance The Service Instance Object from which to obtain datastores. directory The name of the directory where we would like to search datastores Name of the datastores container_object The base object for searches browser_spec BrowserSpec object which defines the search criteria return list of vim.host.DatastoreBrowser.SearchResults objects ''' files = [] datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores) for datobj in datastore_objects: try: task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory), searchSpec=browser_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files')) except salt.exceptions.VMwareFileNotFoundError: pass return files def get_datastores(service_instance, reference, datastore_names=None, backing_disk_ids=None, get_all_datastores=False): ''' Returns a list of vim.Datastore objects representing the datastores visible from a VMware object, filtered by their names, or the backing disk cannonical name or scsi_addresses service_instance The Service Instance Object from which to obtain datastores. reference The VMware object from which the datastores are visible. datastore_names The list of datastore names to be retrieved. Default value is None. backing_disk_ids The list of canonical names of the disks backing the datastores to be retrieved. Only supported if reference is a vim.HostSystem. Default value is None get_all_datastores Specifies whether to retrieve all disks in the host. Default value is False. ''' obj_name = get_managed_object_name(reference) if get_all_datastores: log.trace('Retrieving all datastores visible to \'%s\'', obj_name) else: log.trace('Retrieving datastores visible to \'%s\': names = (%s); ' 'backing disk ids = (%s)', obj_name, datastore_names, backing_disk_ids) if backing_disk_ids and not isinstance(reference, vim.HostSystem): raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\' when backing disk filter ' 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem log.trace('Filtering datastores with backing disk ids: %s', backing_disk_ids) storage_system = get_storage_system(service_instance, reference, obj_name) props = salt.utils.vmware.get_properties_of_managed_object( storage_system, ['fileSystemVolumeInfo.mountInfo']) mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk for vol in [i.volume for i in mount_infos if isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a # canonical name of interest continue log.trace('Found datastore \'%s\' for disk id(s) \'%s\'', vol.name, [e.diskName for e in vol.extent]) disk_datastores.append(vol.name) log.trace('Datastore found for disk filter: %s', disk_datastores) if datastore_names: datastore_names.extend(disk_datastores) else: datastore_names = disk_datastores if (not get_all_datastores) and (not datastore_names): log.trace('No datastore to be filtered after retrieving the datastores ' 'backed by the disk id(s) \'%s\'', backing_disk_ids) return [] log.trace('datastore_names = %s', datastore_names) # Use the default traversal spec if isinstance(reference, vim.HostSystem): # Create a different traversal spec for hosts because it looks like the # default doesn't retrieve the datastores traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='host_datastore_traversal', path='datastore', skip=False, type=vim.HostSystem) elif isinstance(reference, vim.ClusterComputeResource): # Traversal spec for clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='cluster_datastore_traversal', path='datastore', skip=False, type=vim.ClusterComputeResource) elif isinstance(reference, vim.Datacenter): # Traversal spec for datacenter traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datacenter_datastore_traversal', path='datastore', skip=False, type=vim.Datacenter) elif isinstance(reference, vim.StoragePod): # Traversal spec for datastore clusters traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='datastore_cluster_traversal', path='childEntity', skip=False, type=vim.StoragePod) elif isinstance(reference, vim.Folder) and \ get_managed_object_name(reference) == 'Datacenters': # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, type=vim.Datacenter)], skip=False, type=vim.Folder) else: raise salt.exceptions.ArgumentValueError( 'Unsupported reference type \'{0}\'' ''.format(reference.__class__.__name__)) items = get_mors_with_properties(service_instance, object_type=vim.Datastore, property_list=['name'], container_ref=reference, traversal_spec=traversal_spec) log.trace('Retrieved %s datastores', len(items)) items = [i for i in items if get_all_datastores or i['name'] in datastore_names] log.trace('Filtered datastores: %s', [i['name'] for i in items]) return [i['object'] for i in items] def rename_datastore(datastore_ref, new_datastore_name): ''' Renames a datastore datastore_ref vim.Datastore reference to the datastore object to be changed new_datastore_name New datastore name ''' ds_name = get_managed_object_name(datastore_ref) log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name) try: datastore_ref.RenameDatastore(new_datastore_name) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system ''' if not hostname: hostname = get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostStorageSystem, property_list=['systemFile'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved storage system', hostname) return objs[0]['object'] def _get_partition_info(storage_system, device_path): ''' Returns partition informations for a device path, of type vim.HostDiskPartitionInfo ''' try: partition_infos = \ storage_system.RetrieveDiskPartitionInfo( devicePath=[device_path]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('partition_info = %s', partition_infos[0]) return partition_infos[0] def _get_new_computed_partition_spec(storage_system, device_path, partition_info): ''' Computes the new disk partition info when adding a new vmfs partition that uses up the remainder of the disk; returns a tuple (new_partition_number, vim.HostDiskPartitionSpec ''' log.trace('Adding a partition at the end of the disk and getting the new ' 'computed partition spec') # TODO implement support for multiple partitions # We support adding a partition add the end of the disk with partitions free_partitions = [p for p in partition_info.layout.partition if p.type == 'none'] if not free_partitions: raise salt.exceptions.VMwareObjectNotFoundError( 'Free partition was not found on device \'{0}\'' ''.format(partition_info.deviceName)) free_partition = free_partitions[0] # Create a layout object that copies the existing one layout = vim.HostDiskPartitionLayout( total=partition_info.layout.total, partition=partition_info.layout.partition) # Create a partition with the free space on the disk # Change the free partition type to vmfs free_partition.type = 'vmfs' try: computed_partition_info = storage_system.ComputeDiskPartitionInfo( devicePath=device_path, partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, layout=layout) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('computed partition info = {0}', computed_partition_info) log.trace('Retrieving new partition number') partition_numbers = [p.partition for p in computed_partition_info.layout.partition if (p.start.block == free_partition.start.block or # XXX If the entire disk is free (i.e. the free # disk partition starts at block 0) the newily # created partition is created from block 1 (free_partition.start.block == 0 and p.start.block == 1)) and p.end.block == free_partition.end.block and p.type == 'vmfs'] if not partition_numbers: raise salt.exceptions.VMwareNotFoundError( 'New partition was not found in computed partitions of device ' '\'{0}\''.format(partition_info.deviceName)) log.trace('new partition number = %s', partition_numbers[0]) return (partition_numbers[0], computed_partition_info.spec) def create_vmfs_datastore(host_ref, datastore_name, disk_ref, vmfs_major_version, storage_system=None): ''' Creates a VMFS datastore from a disk_id host_ref vim.HostSystem object referencing a host to create the datastore on datastore_name Name of the datastore disk_ref vim.HostScsiDislk on which the datastore is created vmfs_major_version VMFS major version to use ''' # TODO Support variable sized partitions hostname = get_managed_object_name(host_ref) disk_id = disk_ref.canonicalName log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', ' 'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) target_disk = disk_ref partition_info = _get_partition_info(storage_system, target_disk.devicePath) log.trace('partition_info = %s', partition_info) new_partition_number, partition_spec = _get_new_computed_partition_spec( storage_system, target_disk.devicePath, partition_info ) spec = vim.VmfsDatastoreCreateSpec( vmfs=vim.HostVmfsSpec( majorVersion=vmfs_major_version, volumeName=datastore_name, extent=vim.HostScsiDiskPartition( diskName=disk_id, partition=new_partition_number)), diskUuid=target_disk.uuid, partition=partition_spec) try: ds_ref = \ host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname) return ds_ref def get_host_datastore_system(host_ref, hostname=None): ''' Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object'] def remove_datastore(service_instance, datastore_ref): ''' Creates a VMFS datastore from a disk_id service_instance The Service Instance Object containing the datastore datastore_ref The reference to the datastore to remove ''' ds_props = get_properties_of_managed_object( datastore_ref, ['host', 'info', 'name']) ds_name = ds_props['name'] log.debug('Removing datastore \'%s\'', ds_name) ds_hosts = ds_props.get('host') if not ds_hosts: raise salt.exceptions.VMwareApiError( 'Datastore \'{0}\' can\'t be removed. No ' 'attached hosts found'.format(ds_name)) hostname = get_managed_object_name(ds_hosts[0].key) host_ds_system = get_host_datastore_system(ds_hosts[0].key, hostname=hostname) try: host_ds_system.RemoveDatastore(datastore_ref) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name) def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' Returns a list of vim.HostSystem objects representing ESXi hosts in a vcenter filtered by their names and/or datacenter, cluster membership. service_instance The Service Instance Object from which to obtain the hosts. datacenter_name The datacenter name. Default is None. host_names The host_names to be retrieved. Default is None. cluster_name The cluster name - used to restrict the hosts retrieved. Only used if the datacenter is set. This argument is optional. get_all_hosts Specifies whether to retrieve all hosts in the container. Default value is False. ''' properties = ['name'] if cluster_name and not datacenter_name: raise salt.exceptions.ArgumentValueError( 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes # sense if the datacenter has been specified properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) log.trace('Retrieved hosts: %s', [h['name'] for h in hosts]) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue if get_all_hosts: filtered_hosts.append(h['object']) continue if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts def _get_scsi_address_to_lun_key_map(service_instance, host_ref, storage_system=None, hostname=None): ''' Returns a map between the scsi addresses and the keys of all luns on an ESXi host. map[<scsi_address>] = <lun key> service_instance The Service Instance Object from which to obtain the hosts host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device ' 'info was not retrieved'.format(hostname)) multipath_info = device_info.multipathInfo if not multipath_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' multipath info was not retrieved' ''.format(hostname)) if multipath_info.lun is None: raise salt.exceptions.VMwareObjectRetrievalError( 'No luns were retrieved from host \'{0}\''.format(hostname)) lun_key_by_scsi_addr = {} for l in multipath_info.lun: # The vmware scsi_address may have multiple comma separated values # The first one is the actual scsi address lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun for p in l.path}) log.trace('Scsi address to lun id map on host \'%s\': %s', hostname, lun_key_by_scsi_addr) return lun_key_by_scsi_addr def get_all_luns(host_ref, storage_system=None, hostname=None): ''' Returns a list of all vim.HostScsiDisk objects in a disk host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) if not storage_system: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage system was not retrieved' ''.format(hostname)) try: device_info = storage_system.storageDeviceInfo except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not device_info: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' storage device info was not retrieved' ''.format(hostname)) scsi_luns = device_info.scsiLun if scsi_luns: log.trace('Retrieved scsi luns in host \'%s\': %s', hostname, [l.canonicalName for l in scsi_luns]) return scsi_luns log.trace('Retrieved no scsi_luns in host \'%s\'', hostname) return [] def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): ''' Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their scsi address host_ref The vim.HostSystem object representing the host that contains the requested disks. storage_system The host's storage system. Default is None. hostname Name of the host. This argument is optional. ''' if not hostname: hostname = get_managed_object_name(host_ref) si = get_service_instance_from_managed_object(host_ref, name=hostname) if not storage_system: storage_system = get_storage_system(si, host_ref, hostname) lun_ids_to_scsi_addr_map = \ _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, get_all_disks=False): ''' Returns a list of vim.HostScsiDisk objects representing disks in a ESXi host, filtered by their cannonical names and scsi_addresses host_ref The vim.HostSystem object representing the host that contains the requested disks. disk_ids The list of canonical names of the disks to be retrieved. Default value is None scsi_addresses The list of scsi addresses of the disks to be retrieved. Default value is None get_all_disks Specifies whether to retrieve all disks in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disks: log.trace('Retrieving all disks in host \'%s\'', hostname) else: log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi ' 'addresses = (%s)', hostname, disk_ids, scsi_addresses) if not (disk_ids or scsi_addresses): return [] si = get_service_instance_from_managed_object(host_ref, name=hostname) storage_system = get_storage_system(si, host_ref, hostname) disk_keys = [] if scsi_addresses: # convert the scsi addresses to disk keys lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) disk_keys = [key for scsi_addr, key in six.iteritems(lun_key_by_scsi_addr) if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = %s', disk_keys) scsi_luns = get_all_luns(host_ref, storage_system) scsi_disks = [disk for disk in scsi_luns if isinstance(disk, vim.HostScsiDisk) and ( get_all_disks or # Filter by canonical name (disk_ids and (disk.canonicalName in disk_ids)) or # Filter by disk keys from scsi addresses (disk.key in disk_keys))] log.trace('Retrieved disks in host \'%s\': %s', hostname, [d.canonicalName for d in scsi_disks]) return scsi_disks def get_disk_partition_info(host_ref, disk_id, storage_system=None): ''' Returns all partitions on a disk host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed storage_system The ESXi host's storage system. Default is None. ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) props = get_properties_of_managed_object(storage_system, ['storageDeviceInfo.scsiLun']) if not props.get('storageDeviceInfo.scsiLun'): raise salt.exceptions.VMwareObjectRetrievalError( 'No devices were retrieved in host \'{0}\''.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(props['storageDeviceInfo.scsiLun']), ', '.join([l.canonicalName for l in props['storageDeviceInfo.scsiLun']]) ) disks = [l for l in props['storageDeviceInfo.scsiLun'] if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) partition_info = _get_partition_info(storage_system, disks[0].devicePath) log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'', hostname, len(partition_info.spec.partition), disk_id) return partition_info def erase_disk_partitions(service_instance, host_ref, disk_id, hostname=None, storage_system=None): ''' Erases all partitions on a disk in a vcenter filtered by their names and/or datacenter, cluster membership service_instance The Service Instance Object from which to obtain all information host_ref The reference of the ESXi host containing the disk disk_id The canonical name of the disk whose partitions are to be removed hostname The ESXi hostname. Default is None. storage_system The ESXi host's storage system. Default is None. ''' if not hostname: hostname = get_managed_object_name(host_ref) if not storage_system: storage_system = get_storage_system(service_instance, host_ref, hostname) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.storageSystem', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostStorageSystem, ['storageDeviceInfo.scsiLun'], container_ref=host_ref, traversal_spec=traversal_spec) if not results: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) log.trace( '[%s] Retrieved %s devices: %s', hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), ', '.join([l.canonicalName for l in results[0].get('storageDeviceInfo.scsiLun', [])]) ) disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) if isinstance(l, vim.HostScsiDisk) and l.canonicalName == disk_id] if not disks: raise salt.exceptions.VMwareObjectRetrievalError( 'Disk \'{0}\' was not found in host \'{1}\'' ''.format(disk_id, hostname)) log.trace('[%s] device_path = %s', hostname, disks[0].devicePath) # Erase the partitions by setting an empty partition spec try: storage_system.UpdateDiskPartitions(disks[0].devicePath, vim.HostDiskPartitionSpec()) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id) def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): ''' Returns a list of vim.VsanHostDiskMapping objects representing disks in a ESXi host, filtered by their cannonical names. host_ref The vim.HostSystem object representing the host that contains the requested disks. cache_disk_ids The list of cannonical names of the cache disks to be retrieved. The canonical name of the cache disk is enough to identify the disk group because it is guaranteed to have one and only one cache disk. Default is None. get_all_disk_groups Specifies whether to retrieve all disks groups in the host. Default value is False. ''' hostname = get_managed_object_name(host_ref) if get_all_disk_groups: log.trace('Retrieving all disk groups on host \'%s\'', hostname) else: log.trace('Retrieving disk groups from host \'%s\', with cache disk ' 'ids : (%s)', hostname, cache_disk_ids) if not cache_disk_ids: return [] try: vsan_host_config = host_ref.config.vsanHostConfig except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if not vsan_host_config: raise salt.exceptions.VMwareObjectRetrievalError( 'No host config found on host \'{0}\''.format(hostname)) vsan_storage_info = vsan_host_config.storageInfo if not vsan_storage_info: raise salt.exceptions.VMwareObjectRetrievalError( 'No vsan storage info found on host \'{0}\''.format(hostname)) vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] disk_groups = [dm for dm in vsan_disk_mappings if (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace( 'Retrieved disk groups on host \'%s\', with cache disk ids : %s', hostname, [d.ssd.canonicalName for d in disk_groups] ) return disk_groups def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True # TODO Support host caches on multiple datastores def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0] # TODO Support host caches on multiple datastores def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None): ''' Configures the host cahe of the specified host host_ref The vim.HostSystem object representing the host that contains the requested disks. datastore_ref The vim.Datastore opject representing the datastore the host cache will be configured on. swap_size_MiB The size in Mibibytes of the swap. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) if not host_cache_manager: props = get_properties_of_managed_object( host_ref, ['configManager.cacheConfigurationManager']) if not props.get('configManager.cacheConfigurationManager'): raise salt.exceptions.VMwareObjectRetrievalError( 'Host \'{0}\' has no host cache'.format(hostname)) host_cache_manager = props['configManager.cacheConfigurationManager'] log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', ' 'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB) spec = vim.HostCacheConfigurationSpec( datastore=datastore_ref, swapSize=swap_size_MiB) log.trace('host_cache_spec=%s', spec) try: task = host_cache_manager.ConfigureHostCache_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, hostname, 'HostCacheConfigurationTask') log.trace('Configured host cache on host \'%s\'', hostname) return True def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. service_instance The Service Instance Object from which to obtain hosts. ''' return list_objects(service_instance, vim.HostSystem) def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None, get_all_resource_pools=False): ''' Retrieves resource pool objects service_instance The service instance object to query the vCenter resource_pool_names Resource pool names datacenter_name Name of the datacenter where the resource pool is available get_all_resource_pools Boolean return Resourcepool managed object reference ''' properties = ['name'] if not resource_pool_names: resource_pool_names = [] if datacenter_name: container_ref = get_datacenter(service_instance, datacenter_name) else: container_ref = get_root_folder(service_instance) resource_pools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=container_ref, property_list=properties) selected_pools = [] for pool in resource_pools: if get_all_resource_pools or (pool['name'] in resource_pool_names): selected_pools.append(pool['object']) if not selected_pools: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pools with properties ' 'names={} get_all={} could not be found'.format(selected_pools, get_all_resource_pools)) return selected_pools def list_resourcepools(service_instance): ''' Returns a list of resource pools associated with a given service instance. service_instance The Service Instance Object from which to obtain resource pools. ''' return list_objects(service_instance, vim.ResourcePool) def list_networks(service_instance): ''' Returns a list of networks associated with a given service instance. service_instance The Service Instance Object from which to obtain networks. ''' return list_objects(service_instance, vim.Network) def list_vms(service_instance): ''' Returns a list of VMs associated with a given service instance. service_instance The Service Instance Object from which to obtain VMs. ''' return list_objects(service_instance, vim.VirtualMachine) def list_folders(service_instance): ''' Returns a list of folders associated with a given service instance. service_instance The Service Instance Object from which to obtain folders. ''' return list_objects(service_instance, vim.Folder) def list_dvs(service_instance): ''' Returns a list of distributed virtual switches associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.DistributedVirtualSwitch) def list_vapps(service_instance): ''' Returns a list of vApps associated with a given service instance. service_instance The Service Instance Object from which to obtain vApps. ''' return list_objects(service_instance, vim.VirtualApp) def list_portgroups(service_instance): ''' Returns a list of distributed virtual portgroups associated with a given service instance. service_instance The Service Instance Object from which to obtain distributed virtual switches. ''' return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup) def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message) def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None, traversal_spec=None, parent_ref=None): ''' Get virtual machine properties based on the traversal specs and properties list, returns Virtual Machine object with properties. service_instance Service instance object to access vCenter name Name of the virtual machine. datacenter Datacenter name vm_properties List of vm properties. traversal_spec Traversal Spec object(s) for searching. parent_ref Container Reference object for searching under a given object. ''' if datacenter and not parent_ref: parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if not vm_properties: vm_properties = ['name', 'config.hardware.device', 'summary.storage.committed', 'summary.storage.uncommitted', 'summary.storage.unshared', 'layoutEx.file', 'config.guestFullName', 'config.guestId', 'guest.net', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'config.files.vmPathName', 'summary.runtime.powerState', 'guest.toolsStatus'] vm_list = salt.utils.vmware.get_mors_with_properties(service_instance, vim.VirtualMachine, vm_properties, container_ref=parent_ref, traversal_spec=traversal_spec) vm_formatted = [vm for vm in vm_list if vm['name'] == name] if not vm_formatted: raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.') elif len(vm_formatted) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple virtual machines were found with the' 'same name, please specify a container.'])) return vm_formatted[0] def get_folder(service_instance, datacenter, placement, base_vm_name=None): ''' Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning) ''' log.trace('Retrieving folder information') if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name']) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent']) if 'parent' in vm_props: folder_object = vm_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The virtual machine parent', 'object is not defined'])) elif 'folder' in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified folder {0}'.format(placement['folder'])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder']) if 'vmFolder' in dc_props: folder_object = dc_props['vmFolder'] else: raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined') return folder_object def get_placement(service_instance, datacenter, placement=None): ''' To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible. datacenter Name of the datacenter placement Dictionary with the placement info, cluster, host resource pool name return Resource pool, cluster and host object if any applies ''' log.trace('Retrieving placement information') resourcepool_object, placement_object = None, None if 'host' in placement: host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']]) if not host_objects: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The specified host', '{0} cannot be found.'.format(placement['host'])])) try: host_props = \ get_properties_of_managed_object(host_objects[0], properties=['resourcePool']) resourcepool_object = host_props['resourcePool'] except vmodl.query.InvalidProperty: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='parent', skip=True, type=vim.HostSystem, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='resourcePool', skip=False, type=vim.ClusterComputeResource)]) resourcepools = get_mors_with_properties(service_instance, vim.ResourcePool, container_ref=host_objects[0], property_list=['name'], traversal_spec=traversal_spec) if resourcepools: resourcepool_object = resourcepools[0]['object'] else: raise salt.exceptions.VMwareObjectRetrievalError( 'The resource pool of host {0} cannot be found.'.format(placement['host'])) placement_object = host_objects[0] elif 'resourcepool' in placement: resourcepool_objects = get_resource_pools(service_instance, [placement['resourcepool']], datacenter_name=datacenter) if len(resourcepool_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(' '.join([ 'Multiple instances are available of the', 'specified host {}.'.format(placement['host'])])) resourcepool_object = resourcepool_objects[0] res_props = get_properties_of_managed_object(resourcepool_object, properties=['parent']) if 'parent' in res_props: placement_object = res_props['parent'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The resource pool\'s parent', 'object is not defined'])) elif 'cluster' in placement: datacenter_object = get_datacenter(service_instance, datacenter) cluster_object = get_cluster(datacenter_object, placement['cluster']) clus_props = get_properties_of_managed_object(cluster_object, properties=['resourcePool']) if 'resourcePool' in clus_props: resourcepool_object = clus_props['resourcePool'] else: raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'The cluster\'s resource pool', 'object is not defined'])) placement_object = cluster_object else: # We are checking the schema for this object, this exception should never be raised raise salt.exceptions.VMwareObjectRetrievalError(' '.join([ 'Placement is not defined.'])) return (resourcepool_object, placement_object) def convert_to_kb(unit, size): ''' Converts the given size to KB based on the unit, returns a long integer. unit Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB size Number which represents the size ''' if unit.lower() == 'gb': # vCenter needs long value target_size = int(size * 1024 * 1024) elif unit.lower() == 'mb': target_size = int(size * 1024) elif unit.lower() == 'kb': target_size = int(size) else: raise salt.exceptions.ArgumentValueError('The unit is not specified') return {'size': target_size, 'unit': 'KB'} def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None): ''' Creates virtual machine from config spec vm_name Virtual machine name to be created vm_config_spec Virtual Machine Config Spec object folder_object vm Folder managed object reference resourcepool_object Resource pool object where the machine will be created host_object Host object where the machine will ne placed (optional) return Virtual Machine managed object reference ''' try: if host_object and isinstance(host_object, vim.HostSystem): task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object, host=host_object) else: task = folder_object.CreateVM_Task(vm_config_spec, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info') return vm_object def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None): ''' Registers a virtual machine to the inventory with the given vmx file, on success it returns the vim.VirtualMachine managed object reference datacenter Datacenter object of the virtual machine, vim.Datacenter object name Name of the virtual machine vmx_path: Full path to the vmx file, datastore name should be included resourcepool Placement resource pool of the virtual machine, vim.ResourcePool object host Placement host of the virtual machine, vim.HostSystem object ''' try: if host_object: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, host=host_object, pool=resourcepool_object) else: task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name, asTemplate=False, pool=resourcepool_object) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) try: vm_ref = wait_for_task(task, name, 'RegisterVM Task') except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwareVmRegisterError( 'An error occurred during registration operation, the ' 'configuration file was not found: {0}'.format(exc)) return vm_ref def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref def delete_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: task = vm_ref.Destroy_Task() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, vm_name, 'Destroy Task') def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)